Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core-2.6: (27 commits)
  Driver core: fix race in dev_driver_string
  Driver Core: Early platform driver buffer
  sysfs: sysfs_setattr remove unnecessary permission check.
  sysfs: Factor out sysfs_rename from sysfs_rename_dir and sysfs_move_dir
  sysfs: Propagate renames to the vfs on demand
  sysfs: Gut sysfs_addrm_start and sysfs_addrm_finish
  sysfs: In sysfs_chmod_file lazily propagate the mode change.
  sysfs: Implement sysfs_getattr & sysfs_permission
  sysfs: Nicely indent sysfs_symlink_inode_operations
  sysfs: Update s_iattr on link and unlink.
  sysfs: Fix locking and factor out sysfs_sd_setattr
  sysfs: Simplify iattr time assignments
  sysfs: Simplify sysfs_chmod_file semantics
  sysfs: Use dentry_ops instead of directly playing with the dcache
  sysfs: Rename sysfs_d_iput to sysfs_dentry_iput
  sysfs: Update sysfs_setxattr so it updates secdata under the sysfs_mutex
  debugfs: fix create mutex racy fops and private data
  Driver core: Don't remove kobjects in device_shutdown.
  firmware_class: make request_firmware_nowait more useful
  Driver-Core: devtmpfs - set root directory mode to 0755
  ...
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
index 7772928..deb6b48 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb
+++ b/Documentation/ABI/testing/sysfs-bus-usb
@@ -144,3 +144,16 @@
 
 		Write a 1 to force the device to disconnect
 		(equivalent to unplugging a wired USB device).
+
+What:		/sys/bus/usb/drivers/.../remove_id
+Date:		November 2009
+Contact:	CHENG Renquan <rqcheng@smu.edu.sg>
+Description:
+		Writing a device ID to this file will remove an ID
+		that was dynamically added via the new_id sysfs entry.
+		The format for the device ID is:
+		idVendor idProduct.	After successfully
+		removing an ID, the driver will no longer support the
+		device.  This is useful to ensure auto probing won't
+		match the driver to the device.  For example:
+		# echo "046d c315" > /sys/bus/usb/drivers/foo/remove_id
diff --git a/Documentation/ABI/testing/sysfs-class-uwb_rc-wusbhc b/Documentation/ABI/testing/sysfs-class-uwb_rc-wusbhc
index 4e8106f..25b1e75 100644
--- a/Documentation/ABI/testing/sysfs-class-uwb_rc-wusbhc
+++ b/Documentation/ABI/testing/sysfs-class-uwb_rc-wusbhc
@@ -23,3 +23,16 @@
                 Since this relates to security (specifically, the
                 lifetime of PTKs and GTKs) it should not be changed
                 from the default.
+
+What:           /sys/class/uwb_rc/uwbN/wusbhc/wusb_phy_rate
+Date:           August 2009
+KernelVersion:  2.6.32
+Contact:        David Vrabel <david.vrabel@csr.com>
+Description:
+                The maximum PHY rate to use for all connected devices.
+                This is only of limited use for testing and
+                development as the hardware's automatic rate
+                adaptation is better then this simple control.
+
+                Refer to [ECMA-368] section 10.3.1.1 for the value to
+                use.
diff --git a/Documentation/arm/OMAP/DSS b/Documentation/arm/OMAP/DSS
new file mode 100644
index 0000000..0af0e9e
--- /dev/null
+++ b/Documentation/arm/OMAP/DSS
@@ -0,0 +1,317 @@
+OMAP2/3 Display Subsystem
+-------------------------
+
+This is an almost total rewrite of the OMAP FB driver in drivers/video/omap
+(let's call it DSS1). The main differences between DSS1 and DSS2 are DSI,
+TV-out and multiple display support, but there are lots of small improvements
+also.
+
+The DSS2 driver (omapdss module) is in arch/arm/plat-omap/dss/, and the FB,
+panel and controller drivers are in drivers/video/omap2/. DSS1 and DSS2 live
+currently side by side, you can choose which one to use.
+
+Features
+--------
+
+Working and tested features include:
+
+- MIPI DPI (parallel) output
+- MIPI DSI output in command mode
+- MIPI DBI (RFBI) output
+- SDI output
+- TV output
+- All pieces can be compiled as a module or inside kernel
+- Use DISPC to update any of the outputs
+- Use CPU to update RFBI or DSI output
+- OMAP DISPC planes
+- RGB16, RGB24 packed, RGB24 unpacked
+- YUV2, UYVY
+- Scaling
+- Adjusting DSS FCK to find a good pixel clock
+- Use DSI DPLL to create DSS FCK
+
+Tested boards include:
+- OMAP3 SDP board
+- Beagle board
+- N810
+
+omapdss driver
+--------------
+
+The DSS driver does not itself have any support for Linux framebuffer, V4L or
+such like the current ones, but it has an internal kernel API that upper level
+drivers can use.
+
+The DSS driver models OMAP's overlays, overlay managers and displays in a
+flexible way to enable non-common multi-display configuration. In addition to
+modelling the hardware overlays, omapdss supports virtual overlays and overlay
+managers. These can be used when updating a display with CPU or system DMA.
+
+Panel and controller drivers
+----------------------------
+
+The drivers implement panel or controller specific functionality and are not
+usually visible to users except through omapfb driver.  They register
+themselves to the DSS driver.
+
+omapfb driver
+-------------
+
+The omapfb driver implements arbitrary number of standard linux framebuffers.
+These framebuffers can be routed flexibly to any overlays, thus allowing very
+dynamic display architecture.
+
+The driver exports some omapfb specific ioctls, which are compatible with the
+ioctls in the old driver.
+
+The rest of the non standard features are exported via sysfs. Whether the final
+implementation will use sysfs, or ioctls, is still open.
+
+V4L2 drivers
+------------
+
+V4L2 is being implemented in TI.
+
+From omapdss point of view the V4L2 drivers should be similar to framebuffer
+driver.
+
+Architecture
+--------------------
+
+Some clarification what the different components do:
+
+    - Framebuffer is a memory area inside OMAP's SRAM/SDRAM that contains the
+      pixel data for the image. Framebuffer has width and height and color
+      depth.
+    - Overlay defines where the pixels are read from and where they go on the
+      screen. The overlay may be smaller than framebuffer, thus displaying only
+      part of the framebuffer. The position of the overlay may be changed if
+      the overlay is smaller than the display.
+    - Overlay manager combines the overlays in to one image and feeds them to
+      display.
+    - Display is the actual physical display device.
+
+A framebuffer can be connected to multiple overlays to show the same pixel data
+on all of the overlays. Note that in this case the overlay input sizes must be
+the same, but, in case of video overlays, the output size can be different. Any
+framebuffer can be connected to any overlay.
+
+An overlay can be connected to one overlay manager. Also DISPC overlays can be
+connected only to DISPC overlay managers, and virtual overlays can be only
+connected to virtual overlays.
+
+An overlay manager can be connected to one display. There are certain
+restrictions which kinds of displays an overlay manager can be connected:
+
+    - DISPC TV overlay manager can be only connected to TV display.
+    - Virtual overlay managers can only be connected to DBI or DSI displays.
+    - DISPC LCD overlay manager can be connected to all displays, except TV
+      display.
+
+Sysfs
+-----
+The sysfs interface is mainly used for testing. I don't think sysfs
+interface is the best for this in the final version, but I don't quite know
+what would be the best interfaces for these things.
+
+The sysfs interface is divided to two parts: DSS and FB.
+
+/sys/class/graphics/fb? directory:
+mirror		0=off, 1=on
+rotate		Rotation 0-3 for 0, 90, 180, 270 degrees
+rotate_type	0 = DMA rotation, 1 = VRFB rotation
+overlays	List of overlay numbers to which framebuffer pixels go
+phys_addr	Physical address of the framebuffer
+virt_addr	Virtual address of the framebuffer
+size		Size of the framebuffer
+
+/sys/devices/platform/omapdss/overlay? directory:
+enabled		0=off, 1=on
+input_size	width,height (ie. the framebuffer size)
+manager		Destination overlay manager name
+name
+output_size	width,height
+position	x,y
+screen_width	width
+global_alpha   	global alpha 0-255 0=transparent 255=opaque
+
+/sys/devices/platform/omapdss/manager? directory:
+display				Destination display
+name
+alpha_blending_enabled		0=off, 1=on
+trans_key_enabled		0=off, 1=on
+trans_key_type			gfx-destination, video-source
+trans_key_value			transparency color key (RGB24)
+default_color			default background color (RGB24)
+
+/sys/devices/platform/omapdss/display? directory:
+ctrl_name	Controller name
+mirror		0=off, 1=on
+update_mode	0=off, 1=auto, 2=manual
+enabled		0=off, 1=on
+name
+rotate		Rotation 0-3 for 0, 90, 180, 270 degrees
+timings		Display timings (pixclock,xres/hfp/hbp/hsw,yres/vfp/vbp/vsw)
+		When writing, two special timings are accepted for tv-out:
+		"pal" and "ntsc"
+panel_name
+tear_elim	Tearing elimination 0=off, 1=on
+
+There are also some debugfs files at <debugfs>/omapdss/ which show information
+about clocks and registers.
+
+Examples
+--------
+
+The following definitions have been made for the examples below:
+
+ovl0=/sys/devices/platform/omapdss/overlay0
+ovl1=/sys/devices/platform/omapdss/overlay1
+ovl2=/sys/devices/platform/omapdss/overlay2
+
+mgr0=/sys/devices/platform/omapdss/manager0
+mgr1=/sys/devices/platform/omapdss/manager1
+
+lcd=/sys/devices/platform/omapdss/display0
+dvi=/sys/devices/platform/omapdss/display1
+tv=/sys/devices/platform/omapdss/display2
+
+fb0=/sys/class/graphics/fb0
+fb1=/sys/class/graphics/fb1
+fb2=/sys/class/graphics/fb2
+
+Default setup on OMAP3 SDP
+--------------------------
+
+Here's the default setup on OMAP3 SDP board. All planes go to LCD. DVI
+and TV-out are not in use. The columns from left to right are:
+framebuffers, overlays, overlay managers, displays. Framebuffers are
+handled by omapfb, and the rest by the DSS.
+
+FB0 --- GFX  -\            DVI
+FB1 --- VID1 --+- LCD ---- LCD
+FB2 --- VID2 -/   TV ----- TV
+
+Example: Switch from LCD to DVI
+----------------------
+
+w=`cat $dvi/timings | cut -d "," -f 2 | cut -d "/" -f 1`
+h=`cat $dvi/timings | cut -d "," -f 3 | cut -d "/" -f 1`
+
+echo "0" > $lcd/enabled
+echo "" > $mgr0/display
+fbset -fb /dev/fb0 -xres $w -yres $h -vxres $w -vyres $h
+# at this point you have to switch the dvi/lcd dip-switch from the omap board
+echo "dvi" > $mgr0/display
+echo "1" > $dvi/enabled
+
+After this the configuration looks like:
+
+FB0 --- GFX  -\         -- DVI
+FB1 --- VID1 --+- LCD -/   LCD
+FB2 --- VID2 -/   TV ----- TV
+
+Example: Clone GFX overlay to LCD and TV
+-------------------------------
+
+w=`cat $tv/timings | cut -d "," -f 2 | cut -d "/" -f 1`
+h=`cat $tv/timings | cut -d "," -f 3 | cut -d "/" -f 1`
+
+echo "0" > $ovl0/enabled
+echo "0" > $ovl1/enabled
+
+echo "" > $fb1/overlays
+echo "0,1" > $fb0/overlays
+
+echo "$w,$h" > $ovl1/output_size
+echo "tv" > $ovl1/manager
+
+echo "1" > $ovl0/enabled
+echo "1" > $ovl1/enabled
+
+echo "1" > $tv/enabled
+
+After this the configuration looks like (only relevant parts shown):
+
+FB0 +-- GFX  ---- LCD ---- LCD
+     \- VID1 ---- TV  ---- TV
+
+Misc notes
+----------
+
+OMAP FB allocates the framebuffer memory using the OMAP VRAM allocator.
+
+Using DSI DPLL to generate pixel clock it is possible produce the pixel clock
+of 86.5MHz (max possible), and with that you get 1280x1024@57 output from DVI.
+
+Rotation and mirroring currently only supports RGB565 and RGB8888 modes. VRFB
+does not support mirroring.
+
+VRFB rotation requires much more memory than non-rotated framebuffer, so you
+probably need to increase your vram setting before using VRFB rotation. Also,
+many applications may not work with VRFB if they do not pay attention to all
+framebuffer parameters.
+
+Kernel boot arguments
+---------------------
+
+vram=<size>
+	- Amount of total VRAM to preallocate. For example, "10M". omapfb
+	  allocates memory for framebuffers from VRAM.
+
+omapfb.mode=<display>:<mode>[,...]
+	- Default video mode for specified displays. For example,
+	  "dvi:800x400MR-24@60".  See drivers/video/modedb.c.
+	  There are also two special modes: "pal" and "ntsc" that
+	  can be used to tv out.
+
+omapfb.vram=<fbnum>:<size>[@<physaddr>][,...]
+	- VRAM allocated for a framebuffer. Normally omapfb allocates vram
+	  depending on the display size. With this you can manually allocate
+	  more or define the physical address of each framebuffer. For example,
+	  "1:4M" to allocate 4M for fb1.
+
+omapfb.debug=<y|n>
+	- Enable debug printing. You have to have OMAPFB debug support enabled
+	  in kernel config.
+
+omapfb.test=<y|n>
+	- Draw test pattern to framebuffer whenever framebuffer settings change.
+	  You need to have OMAPFB debug support enabled in kernel config.
+
+omapfb.vrfb=<y|n>
+	- Use VRFB rotation for all framebuffers.
+
+omapfb.rotate=<angle>
+	- Default rotation applied to all framebuffers.
+	  0 - 0 degree rotation
+	  1 - 90 degree rotation
+	  2 - 180 degree rotation
+	  3 - 270 degree rotation
+
+omapfb.mirror=<y|n>
+	- Default mirror for all framebuffers. Only works with DMA rotation.
+
+omapdss.def_disp=<display>
+	- Name of default display, to which all overlays will be connected.
+	  Common examples are "lcd" or "tv".
+
+omapdss.debug=<y|n>
+	- Enable debug printing. You have to have DSS debug support enabled in
+	  kernel config.
+
+TODO
+----
+
+DSS locking
+
+Error checking
+- Lots of checks are missing or implemented just as BUG()
+
+System DMA update for DSI
+- Can be used for RGB16 and RGB24P modes. Probably not for RGB24U (how
+  to skip the empty byte?)
+
+OMAP1 support
+- Not sure if needed
+
diff --git a/Documentation/filesystems/nilfs2.txt b/Documentation/filesystems/nilfs2.txt
index 01539f4..4949fca 100644
--- a/Documentation/filesystems/nilfs2.txt
+++ b/Documentation/filesystems/nilfs2.txt
@@ -49,8 +49,7 @@
 NILFS2 supports the following mount options:
 (*) == default
 
-barrier=on(*)		This enables/disables barriers. barrier=off disables
-			it, barrier=on enables it.
+nobarrier		Disables barriers.
 errors=continue(*)	Keep going on a filesystem error.
 errors=remount-ro	Remount the filesystem read-only on an error.
 errors=panic		Panic and halt the machine if an error occurs.
@@ -71,6 +70,10 @@
 			blocks.  That means, it is guaranteed that no
 			overtaking of events occurs in the recovered file
 			system after a crash.
+norecovery		Disable recovery of the filesystem on mount.
+			This disables every write access on the device for
+			read-only mounts or snapshots.  This option will fail
+			for r/w mounts on an unclean volume.
 
 NILFS2 usage
 ============
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 777dc8a..3f886e2 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2663,6 +2663,8 @@
 			to a common usb-storage quirk flag as follows:
 				a = SANE_SENSE (collect more than 18 bytes
 					of sense data);
+				b = BAD_SENSE (don't collect more than 18
+					bytes of sense data);
 				c = FIX_CAPACITY (decrease the reported
 					device capacity by one sector);
 				h = CAPACITY_HEURISTICS (decrease the
diff --git a/Documentation/powerpc/dts-bindings/xilinx.txt b/Documentation/powerpc/dts-bindings/xilinx.txt
index 80339fe..ea68046 100644
--- a/Documentation/powerpc/dts-bindings/xilinx.txt
+++ b/Documentation/powerpc/dts-bindings/xilinx.txt
@@ -292,4 +292,15 @@
        - reg-offset : A value of 3 is required
        - reg-shift : A value of 2 is required
 
+      vii) Xilinx USB Host controller
+
+      The Xilinx USB host controller is EHCI compatible but with a different
+      base address for the EHCI registers, and it is always a big-endian
+      USB Host controller. The hardware can be configured as high speed only,
+      or high speed/full speed hybrid.
+
+      Required properties:
+      - xlnx,support-usb-fs: A value 0 means the core is built as high speed
+                             only. A value 1 means the core also supports
+                             full speed devices.
 
diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
index ad64261..c7c1dc2 100644
--- a/Documentation/usb/power-management.txt
+++ b/Documentation/usb/power-management.txt
@@ -2,7 +2,7 @@
 
 		 Alan Stern <stern@rowland.harvard.edu>
 
-			    October 5, 2007
+			    November 10, 2009
 
 
 
@@ -123,9 +123,9 @@
 
 	power/level
 
-		This file contains one of three words: "on", "auto",
-		or "suspend".  You can write those words to the file
-		to change the device's setting.
+		This file contains one of two words: "on" or "auto".
+		You can write those words to the file to change the
+		device's setting.
 
 		"on" means that the device should be resumed and
 		autosuspend is not allowed.  (Of course, system
@@ -134,10 +134,10 @@
 		"auto" is the normal state in which the kernel is
 		allowed to autosuspend and autoresume the device.
 
-		"suspend" means that the device should remain
-		suspended, and autoresume is not allowed.  (But remote
-		wakeup may still be allowed, since it is controlled
-		separately by the power/wakeup attribute.)
+		(In kernels up to 2.6.32, you could also specify
+		"suspend", meaning that the device should remain
+		suspended and autoresume was not allowed.  This
+		setting is no longer supported.)
 
 	power/autosuspend
 
@@ -313,13 +313,14 @@
 that it supports autosuspend by setting the .supports_autosuspend flag
 in its usb_driver structure.  It is then responsible for informing the
 USB core whenever one of its interfaces becomes busy or idle.  The
-driver does so by calling these five functions:
+driver does so by calling these six functions:
 
 	int  usb_autopm_get_interface(struct usb_interface *intf);
 	void usb_autopm_put_interface(struct usb_interface *intf);
-	int  usb_autopm_set_interface(struct usb_interface *intf);
 	int  usb_autopm_get_interface_async(struct usb_interface *intf);
 	void usb_autopm_put_interface_async(struct usb_interface *intf);
+	void usb_autopm_get_interface_no_resume(struct usb_interface *intf);
+	void usb_autopm_put_interface_no_suspend(struct usb_interface *intf);
 
 The functions work by maintaining a counter in the usb_interface
 structure.  When intf->pm_usage_count is > 0 then the interface is
@@ -331,11 +332,13 @@
 associated with the device itself rather than any of its interfaces.
 This field is used only by the USB core.)
 
-The driver owns intf->pm_usage_count; it can modify the value however
-and whenever it likes.  A nice aspect of the non-async usb_autopm_*
-routines is that the changes they make are protected by the usb_device
-structure's PM mutex (udev->pm_mutex); however drivers may change
-pm_usage_count without holding the mutex.  Drivers using the async
+Drivers must not modify intf->pm_usage_count directly; its value
+should be changed only be using the functions listed above.  Drivers
+are responsible for insuring that the overall change to pm_usage_count
+during their lifetime balances out to 0 (it may be necessary for the
+disconnect method to call usb_autopm_put_interface() one or more times
+to fulfill this requirement).  The first two routines use the PM mutex
+in struct usb_device for mutual exclusion; drivers using the async
 routines are responsible for their own synchronization and mutual
 exclusion.
 
@@ -347,11 +350,6 @@
 	attempts an autosuspend if the new value is <= 0 and the
 	device isn't suspended.
 
-	usb_autopm_set_interface() leaves pm_usage_count alone.
-	It attempts an autoresume if the value is > 0 and the device
-	is suspended, and it attempts an autosuspend if the value is
-	<= 0 and the device isn't suspended.
-
 	usb_autopm_get_interface_async() and
 	usb_autopm_put_interface_async() do almost the same things as
 	their non-async counterparts.  The differences are: they do
@@ -360,13 +358,11 @@
 	such as an URB's completion handler, but when they return the
 	device will not generally not yet be in the desired state.
 
-There also are a couple of utility routines drivers can use:
-
-	usb_autopm_enable() sets pm_usage_cnt to 0 and then calls
-	usb_autopm_set_interface(), which will attempt an autosuspend.
-
-	usb_autopm_disable() sets pm_usage_cnt to 1 and then calls
-	usb_autopm_set_interface(), which will attempt an autoresume.
+	usb_autopm_get_interface_no_resume() and
+	usb_autopm_put_interface_no_suspend() merely increment or
+	decrement the pm_usage_count value; they do not attempt to
+	carry out an autoresume or an autosuspend.  Hence they can be
+	called in an atomic context.
 
 The conventional usage pattern is that a driver calls
 usb_autopm_get_interface() in its open routine and
@@ -400,11 +396,11 @@
 Normally a driver would set this flag in its probe method, at which
 time the device is guaranteed not to be autosuspended.)
 
-The usb_autopm_* routines have to run in a sleepable process context;
-they must not be called from an interrupt handler or while holding a
-spinlock.  In fact, the entire autosuspend mechanism is not well geared
-toward interrupt-driven operation.  However there is one thing a
-driver can do in an interrupt handler:
+The synchronous usb_autopm_* routines have to run in a sleepable
+process context; they must not be called from an interrupt handler or
+while holding a spinlock.  In fact, the entire autosuspend mechanism
+is not well geared toward interrupt-driven operation.  However there
+is one thing a driver can do in an interrupt handler:
 
 	usb_mark_last_busy(struct usb_device *udev);
 
@@ -423,15 +419,16 @@
 
 External suspend calls should never be allowed to fail in this way,
 only autosuspend calls.  The driver can tell them apart by checking
-udev->auto_pm; this flag will be set to 1 for internal PM events
-(autosuspend or autoresume) and 0 for external PM events.
+the PM_EVENT_AUTO bit in the message.event argument to the suspend
+method; this bit will be set for internal PM events (autosuspend) and
+clear for external PM events.
 
 Many of the ingredients in the autosuspend framework are oriented
 towards interfaces: The usb_interface structure contains the
 pm_usage_cnt field, and the usb_autopm_* routines take an interface
 pointer as their argument.  But somewhat confusingly, a few of the
-pieces (usb_mark_last_busy() and udev->auto_pm) use the usb_device
-structure instead.  Drivers need to keep this straight; they can call
+pieces (i.e., usb_mark_last_busy()) use the usb_device structure
+instead.  Drivers need to keep this straight; they can call
 interface_to_usbdev() to find the device structure for a given
 interface.
 
diff --git a/MAINTAINERS b/MAINTAINERS
index d7f8668..520a3b3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3903,6 +3903,23 @@
 S:	Maintained
 F:	drivers/video/omap/
 
+OMAP DISPLAY SUBSYSTEM SUPPORT (DSS2)
+M:	Tomi Valkeinen <tomi.valkeinen@nokia.com>
+L:	linux-omap@vger.kernel.org
+L:	linux-fbdev@vger.kernel.org (moderated for non-subscribers)
+S:	Maintained
+F:	drivers/video/omap2/dss/
+F:	drivers/video/omap2/vrfb.c
+F:	drivers/video/omap2/vram.c
+F:	Documentation/arm/OMAP/DSS
+
+OMAP FRAMEBUFFER SUPPORT (FOR DSS2)
+M:	Tomi Valkeinen <tomi.valkeinen@nokia.com>
+L:	linux-omap@vger.kernel.org
+L:	linux-fbdev@vger.kernel.org (moderated for non-subscribers)
+S:	Maintained
+F:	drivers/video/omap2/omapfb/
+
 OMAP MMC SUPPORT
 M:	Jarkko Lavinen <jarkko.lavinen@nokia.com>
 L:	linux-omap@vger.kernel.org
@@ -5663,9 +5680,11 @@
 F:	drivers/net/wireless/rndis_wlan.c
 
 USB XHCI DRIVER
-M:	Sarah Sharp <sarah.a.sharp@intel.com>
+M:	Sarah Sharp <sarah.a.sharp@linux.intel.com>
 L:	linux-usb@vger.kernel.org
 S:	Supported
+F:	drivers/usb/host/xhci*
+F:	drivers/usb/host/pci-quirks*
 
 USB ZC0301 DRIVER
 M:	Luca Risolia <luca.risolia@studio.unibo.it>
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 9a3334a..62619f2 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -178,25 +178,18 @@
 		unsigned long, prot, unsigned long, flags, unsigned long, fd,
 		unsigned long, off)
 {
-	struct file *file = NULL;
-	unsigned long ret = -EBADF;
+	unsigned long ret = -EINVAL;
 
 #if 0
 	if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED))
 		printk("%s: unimplemented OSF mmap flags %04lx\n", 
 			current->comm, flags);
 #endif
-	if (!(flags & MAP_ANONYMOUS)) {
-		file = fget(fd);
-		if (!file)
-			goto out;
-	}
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-	down_write(&current->mm->mmap_sem);
-	ret = do_mmap(file, addr, len, prot, flags, off);
-	up_write(&current->mm->mmap_sem);
-	if (file)
-		fput(file);
+	if ((off + PAGE_ALIGN(len)) < off)
+		goto out;
+	if (off & ~PAGE_MASK)
+		goto out;
+	ret = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
  out:
 	return ret;
 }
diff --git a/arch/arm/configs/omap_3430sdp_defconfig b/arch/arm/configs/omap_3430sdp_defconfig
index 8482958..592457c 100644
--- a/arch/arm/configs/omap_3430sdp_defconfig
+++ b/arch/arm/configs/omap_3430sdp_defconfig
@@ -963,10 +963,32 @@
 #
 # CONFIG_FB_S1D13XXX is not set
 # CONFIG_FB_VIRTUAL is not set
-CONFIG_FB_OMAP=y
-# CONFIG_FB_OMAP_LCDC_EXTERNAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_OMAP_LCD_VGA is not set
 # CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
-CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE=2
+CONFIG_OMAP2_VRAM=y
+CONFIG_OMAP2_VRFB=y
+CONFIG_OMAP2_DSS=y
+CONFIG_OMAP2_VRAM_SIZE=4
+CONFIG_OMAP2_DSS_DEBUG_SUPPORT=y
+# CONFIG_OMAP2_DSS_RFBI is not set
+CONFIG_OMAP2_DSS_VENC=y
+# CONFIG_OMAP2_DSS_SDI is not set
+# CONFIG_OMAP2_DSS_DSI is not set
+# CONFIG_OMAP2_DSS_FAKE_VSYNC is not set
+CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=0
+CONFIG_FB_OMAP2=y
+CONFIG_FB_OMAP2_DEBUG_SUPPORT=y
+# CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE is not set
+CONFIG_FB_OMAP2_NUM_FBS=3
+
+#
+# OMAP2/3 Display Device Drivers
+#
+CONFIG_PANEL_GENERIC=y
+CONFIG_PANEL_SHARP_LS037V7DW01=y
 # CONFIG_BACKLIGHT_LCD_SUPPORT is not set
 
 #
diff --git a/arch/arm/include/asm/mman.h b/arch/arm/include/asm/mman.h
index 8eebf89..41f99c5 100644
--- a/arch/arm/include/asm/mman.h
+++ b/arch/arm/include/asm/mman.h
@@ -1 +1,4 @@
 #include <asm-generic/mman.h>
+
+#define arch_mmap_check(addr, len, flags) \
+	(((flags) & MAP_FIXED && (addr) < FIRST_USER_ADDRESS) ? -EINVAL : 0)
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index f58c115..9314a2d 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -172,7 +172,7 @@
 /* 160 */	CALL(sys_sched_get_priority_min)
 		CALL(sys_sched_rr_get_interval)
 		CALL(sys_nanosleep)
-		CALL(sys_arm_mremap)
+		CALL(sys_mremap)
 		CALL(sys_setresuid16)
 /* 165 */	CALL(sys_getresuid16)
 		CALL(sys_ni_syscall)		/* vm86 */
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index f0fe95b..2c1db77 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -416,12 +416,12 @@
 		tst	r5, #PGOFF_MASK
 		moveq	r5, r5, lsr #PAGE_SHIFT - 12
 		streq	r5, [sp, #4]
-		beq	do_mmap2
+		beq	sys_mmap_pgoff
 		mov	r0, #-EINVAL
 		mov	pc, lr
 #else
 		str	r5, [sp, #4]
-		b	do_mmap2
+		b	sys_mmap_pgoff
 #endif
 ENDPROC(sys_mmap2)
 
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
index 78ecaac..ae4027b 100644
--- a/arch/arm/kernel/sys_arm.c
+++ b/arch/arm/kernel/sys_arm.c
@@ -28,41 +28,6 @@
 #include <linux/ipc.h>
 #include <linux/uaccess.h>
 
-extern unsigned long do_mremap(unsigned long addr, unsigned long old_len,
-			       unsigned long new_len, unsigned long flags,
-			       unsigned long new_addr);
-
-/* common code for old and new mmaps */
-inline long do_mmap2(
-	unsigned long addr, unsigned long len,
-	unsigned long prot, unsigned long flags,
-	unsigned long fd, unsigned long pgoff)
-{
-	int error = -EINVAL;
-	struct file * file = NULL;
-
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
-	if (flags & MAP_FIXED && addr < FIRST_USER_ADDRESS)
-		goto out;
-
-	error = -EBADF;
-	if (!(flags & MAP_ANONYMOUS)) {
-		file = fget(fd);
-		if (!file)
-			goto out;
-	}
-
-	down_write(&current->mm->mmap_sem);
-	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-	up_write(&current->mm->mmap_sem);
-
-	if (file)
-		fput(file);
-out:
-	return error;
-}
-
 struct mmap_arg_struct {
 	unsigned long addr;
 	unsigned long len;
@@ -84,29 +49,11 @@
 	if (a.offset & ~PAGE_MASK)
 		goto out;
 
-	error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
+	error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
 out:
 	return error;
 }
 
-asmlinkage unsigned long
-sys_arm_mremap(unsigned long addr, unsigned long old_len,
-	       unsigned long new_len, unsigned long flags,
-	       unsigned long new_addr)
-{
-	unsigned long ret = -EINVAL;
-
-	if (flags & MREMAP_FIXED && new_addr < FIRST_USER_ADDRESS)
-		goto out;
-
-	down_write(&current->mm->mmap_sem);
-	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
-	up_write(&current->mm->mmap_sem);
-
-out:
-	return ret;
-}
-
 /*
  * Perform the select(nd, in, out, ex, tv) and mmap() system
  * calls.
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index 5a275ba..71e1a3f 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -14,6 +14,7 @@
 #include <linux/platform_device.h>
 #include <linux/input.h>
 #include <linux/clk.h>
+#include <linux/omapfb.h>
 
 #include <linux/spi/spi.h>
 #include <linux/spi/ads7846.h>
@@ -32,7 +33,6 @@
 #include <plat/keypad.h>
 #include <plat/common.h>
 #include <plat/dsp_common.h>
-#include <plat/omapfb.h>
 #include <plat/hwa742.h>
 #include <plat/lcd_mipid.h>
 #include <plat/mmc.h>
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index 491364e..5bda9fd 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -37,6 +37,7 @@
 #include <plat/common.h>
 #include <plat/dma.h>
 #include <plat/gpmc.h>
+#include <plat/display.h>
 
 #include <plat/control.h>
 #include <plat/gpmc-smc91x.h>
@@ -152,31 +153,152 @@
 	},
 };
 
-static struct platform_device sdp3430_lcd_device = {
-	.name		= "sdp2430_lcd",
+
+#define SDP3430_LCD_PANEL_BACKLIGHT_GPIO	8
+#define SDP3430_LCD_PANEL_ENABLE_GPIO		5
+
+static unsigned backlight_gpio;
+static unsigned enable_gpio;
+static int lcd_enabled;
+static int dvi_enabled;
+
+static void __init sdp3430_display_init(void)
+{
+	int r;
+
+	enable_gpio    = SDP3430_LCD_PANEL_ENABLE_GPIO;
+	backlight_gpio = SDP3430_LCD_PANEL_BACKLIGHT_GPIO;
+
+	r = gpio_request(enable_gpio, "LCD reset");
+	if (r) {
+		printk(KERN_ERR "failed to get LCD reset GPIO\n");
+		goto err0;
+	}
+
+	r = gpio_request(backlight_gpio, "LCD Backlight");
+	if (r) {
+		printk(KERN_ERR "failed to get LCD backlight GPIO\n");
+		goto err1;
+	}
+
+	gpio_direction_output(enable_gpio, 0);
+	gpio_direction_output(backlight_gpio, 0);
+
+	return;
+err1:
+	gpio_free(enable_gpio);
+err0:
+	return;
+}
+
+static int sdp3430_panel_enable_lcd(struct omap_dss_device *dssdev)
+{
+	if (dvi_enabled) {
+		printk(KERN_ERR "cannot enable LCD, DVI is enabled\n");
+		return -EINVAL;
+	}
+
+	gpio_direction_output(enable_gpio, 1);
+	gpio_direction_output(backlight_gpio, 1);
+
+	lcd_enabled = 1;
+
+	return 0;
+}
+
+static void sdp3430_panel_disable_lcd(struct omap_dss_device *dssdev)
+{
+	lcd_enabled = 0;
+
+	gpio_direction_output(enable_gpio, 0);
+	gpio_direction_output(backlight_gpio, 0);
+}
+
+static int sdp3430_panel_enable_dvi(struct omap_dss_device *dssdev)
+{
+	if (lcd_enabled) {
+		printk(KERN_ERR "cannot enable DVI, LCD is enabled\n");
+		return -EINVAL;
+	}
+
+	dvi_enabled = 1;
+
+	return 0;
+}
+
+static void sdp3430_panel_disable_dvi(struct omap_dss_device *dssdev)
+{
+	dvi_enabled = 0;
+}
+
+static int sdp3430_panel_enable_tv(struct omap_dss_device *dssdev)
+{
+	return 0;
+}
+
+static void sdp3430_panel_disable_tv(struct omap_dss_device *dssdev)
+{
+}
+
+
+static struct omap_dss_device sdp3430_lcd_device = {
+	.name			= "lcd",
+	.driver_name		= "sharp_ls_panel",
+	.type			= OMAP_DISPLAY_TYPE_DPI,
+	.phy.dpi.data_lines	= 16,
+	.platform_enable	= sdp3430_panel_enable_lcd,
+	.platform_disable	= sdp3430_panel_disable_lcd,
+};
+
+static struct omap_dss_device sdp3430_dvi_device = {
+	.name			= "dvi",
+	.driver_name		= "generic_panel",
+	.type			= OMAP_DISPLAY_TYPE_DPI,
+	.phy.dpi.data_lines	= 24,
+	.platform_enable	= sdp3430_panel_enable_dvi,
+	.platform_disable	= sdp3430_panel_disable_dvi,
+};
+
+static struct omap_dss_device sdp3430_tv_device = {
+	.name			= "tv",
+	.driver_name		= "venc",
+	.type			= OMAP_DISPLAY_TYPE_VENC,
+	.phy.venc.type		= OMAP_DSS_VENC_TYPE_SVIDEO,
+	.platform_enable	= sdp3430_panel_enable_tv,
+	.platform_disable	= sdp3430_panel_disable_tv,
+};
+
+
+static struct omap_dss_device *sdp3430_dss_devices[] = {
+	&sdp3430_lcd_device,
+	&sdp3430_dvi_device,
+	&sdp3430_tv_device,
+};
+
+static struct omap_dss_board_info sdp3430_dss_data = {
+	.num_devices	= ARRAY_SIZE(sdp3430_dss_devices),
+	.devices	= sdp3430_dss_devices,
+	.default_device	= &sdp3430_lcd_device,
+};
+
+static struct platform_device sdp3430_dss_device = {
+	.name		= "omapdss",
 	.id		= -1,
+	.dev		= {
+		.platform_data = &sdp3430_dss_data,
+	},
 };
 
-static struct regulator_consumer_supply sdp3430_vdac_supply = {
-	.supply		= "vdac",
-	.dev		= &sdp3430_lcd_device.dev,
-};
-
-static struct regulator_consumer_supply sdp3430_vdvi_supply = {
-	.supply		= "vdvi",
-	.dev		= &sdp3430_lcd_device.dev,
+static struct regulator_consumer_supply sdp3430_vdda_dac_supply = {
+	.supply		= "vdda_dac",
+	.dev		= &sdp3430_dss_device.dev,
 };
 
 static struct platform_device *sdp3430_devices[] __initdata = {
-	&sdp3430_lcd_device,
-};
-
-static struct omap_lcd_config sdp3430_lcd_config __initdata = {
-	.ctrl_name	= "internal",
+	&sdp3430_dss_device,
 };
 
 static struct omap_board_config_kernel sdp3430_config[] __initdata = {
-	{ OMAP_TAG_LCD,		&sdp3430_lcd_config },
 };
 
 static void __init omap_3430sdp_init_irq(void)
@@ -392,22 +514,34 @@
 					| REGULATOR_CHANGE_STATUS,
 	},
 	.num_consumer_supplies	= 1,
-	.consumer_supplies	= &sdp3430_vdac_supply,
+	.consumer_supplies	= &sdp3430_vdda_dac_supply,
 };
 
 /* VPLL2 for digital video outputs */
+static struct regulator_consumer_supply sdp3430_vpll2_supplies[] = {
+	{
+		.supply		= "vdvi",
+		.dev		= &sdp3430_lcd_device.dev,
+	},
+	{
+		.supply		= "vdds_dsi",
+		.dev		= &sdp3430_dss_device.dev,
+	}
+};
+
 static struct regulator_init_data sdp3430_vpll2 = {
 	.constraints = {
 		.name			= "VDVI",
 		.min_uV			= 1800000,
 		.max_uV			= 1800000,
+		.apply_uV		= true,
 		.valid_modes_mask	= REGULATOR_MODE_NORMAL
 					| REGULATOR_MODE_STANDBY,
 		.valid_ops_mask		= REGULATOR_CHANGE_MODE
 					| REGULATOR_CHANGE_STATUS,
 	},
-	.num_consumer_supplies	= 1,
-	.consumer_supplies	= &sdp3430_vdvi_supply,
+	.num_consumer_supplies	= ARRAY_SIZE(sdp3430_vpll2_supplies),
+	.consumer_supplies	= sdp3430_vpll2_supplies,
 };
 
 static struct twl4030_codec_audio_data sdp3430_audio = {
@@ -521,6 +655,7 @@
 	omap_serial_init();
 	usb_musb_init();
 	board_smc91x_init();
+	sdp3430_display_init();
 	enable_board_wakeup_source();
 	usb_ehci_init(&ehci_pdata);
 }
diff --git a/arch/arm/mach-omap2/clock24xx.c b/arch/arm/mach-omap2/clock24xx.c
index e70e7e0..845b478 100644
--- a/arch/arm/mach-omap2/clock24xx.c
+++ b/arch/arm/mach-omap2/clock24xx.c
@@ -116,10 +116,10 @@
 	CLK(NULL,	"mdm_ick",	&mdm_ick,	CK_243X),
 	CLK(NULL,	"mdm_osc_ck",	&mdm_osc_ck,	CK_243X),
 	/* DSS domain clocks */
-	CLK("omapfb",	"ick",		&dss_ick,	CK_243X | CK_242X),
-	CLK("omapfb",	"dss1_fck",	&dss1_fck,	CK_243X | CK_242X),
-	CLK("omapfb",	"dss2_fck",	&dss2_fck,	CK_243X | CK_242X),
-	CLK("omapfb",	"tv_fck",	&dss_54m_fck,	CK_243X | CK_242X),
+	CLK("omapdss",	"ick",		&dss_ick,	CK_243X | CK_242X),
+	CLK("omapdss",	"dss1_fck",	&dss1_fck,	CK_243X | CK_242X),
+	CLK("omapdss",	"dss2_fck",	&dss2_fck,	CK_243X | CK_242X),
+	CLK("omapdss",	"tv_fck",	&dss_54m_fck,	CK_243X | CK_242X),
 	/* L3 domain clocks */
 	CLK(NULL,	"core_l3_ck",	&core_l3_ck,	CK_243X | CK_242X),
 	CLK(NULL,	"ssi_fck",	&ssi_ssr_sst_fck, CK_243X | CK_242X),
diff --git a/arch/arm/mach-omap2/clock34xx.c b/arch/arm/mach-omap2/clock34xx.c
index 9f2feaf..ecbb5cd 100644
--- a/arch/arm/mach-omap2/clock34xx.c
+++ b/arch/arm/mach-omap2/clock34xx.c
@@ -236,13 +236,13 @@
 	CLK("omap_rng",	"ick",		&rng_ick,	CK_343X),
 	CLK(NULL,	"sha11_ick",	&sha11_ick,	CK_343X),
 	CLK(NULL,	"des1_ick",	&des1_ick,	CK_343X),
-	CLK("omapfb",	"dss1_fck",	&dss1_alwon_fck_3430es1, CK_3430ES1),
-	CLK("omapfb",	"dss1_fck",	&dss1_alwon_fck_3430es2, CK_3430ES2),
-	CLK("omapfb",	"tv_fck",	&dss_tv_fck,	CK_343X),
-	CLK("omapfb",	"video_fck",	&dss_96m_fck,	CK_343X),
-	CLK("omapfb",	"dss2_fck",	&dss2_alwon_fck, CK_343X),
-	CLK("omapfb",	"ick",		&dss_ick_3430es1,	CK_3430ES1),
-	CLK("omapfb",	"ick",		&dss_ick_3430es2,	CK_3430ES2),
+	CLK("omapdss",	"dss1_fck",	&dss1_alwon_fck_3430es1, CK_3430ES1),
+	CLK("omapdss",	"dss1_fck",	&dss1_alwon_fck_3430es2, CK_3430ES2),
+	CLK("omapdss",	"tv_fck",	&dss_tv_fck,	CK_343X),
+	CLK("omapdss",	"video_fck",	&dss_96m_fck,	CK_343X),
+	CLK("omapdss",	"dss2_fck",	&dss2_alwon_fck, CK_343X),
+	CLK("omapdss",	"ick",		&dss_ick_3430es1,	CK_3430ES1),
+	CLK("omapdss",	"ick",		&dss_ick_3430es2,	CK_3430ES2),
 	CLK(NULL,	"cam_mclk",	&cam_mclk,	CK_343X),
 	CLK(NULL,	"cam_ick",	&cam_ick,	CK_343X),
 	CLK(NULL,	"csi2_96m_fck",	&csi2_96m_fck,	CK_343X),
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 59d28b2..6a4d8e4 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -22,17 +22,18 @@
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/clk.h>
+#include <linux/omapfb.h>
 
 #include <asm/tlb.h>
 
 #include <asm/mach/map.h>
 
 #include <plat/mux.h>
-#include <plat/omapfb.h>
 #include <plat/sram.h>
 #include <plat/sdrc.h>
 #include <plat/gpmc.h>
 #include <plat/serial.h>
+#include <plat/vram.h>
 
 #ifndef CONFIG_ARCH_OMAP4	/* FIXME: Remove this once clkdev is ready */
 #include "clock.h"
@@ -264,6 +265,7 @@
 	omap2_check_revision();
 	omap_sram_init();
 	omapfb_reserve_sdram();
+	omap_vram_reserve_sdram();
 }
 
 /*
diff --git a/arch/arm/mach-omap2/sdrc.c b/arch/arm/mach-omap2/sdrc.c
index 9a59219..cbfbd14 100644
--- a/arch/arm/mach-omap2/sdrc.c
+++ b/arch/arm/mach-omap2/sdrc.c
@@ -160,3 +160,19 @@
 	sdrc_write_reg(l, SDRC_POWER);
 	omap2_sms_save_context();
 }
+
+void omap2_sms_write_rot_control(u32 val, unsigned ctx)
+{
+	sms_write_reg(val, SMS_ROT_CONTROL(ctx));
+}
+
+void omap2_sms_write_rot_size(u32 val, unsigned ctx)
+{
+	sms_write_reg(val, SMS_ROT_SIZE(ctx));
+}
+
+void omap2_sms_write_rot_physical_ba(u32 val, unsigned ctx)
+{
+	sms_write_reg(val, SMS_ROT_PHYSICAL_BA(ctx));
+}
+
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 2b79964..f5abc51 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -54,7 +54,8 @@
 	 * We enforce the MAP_FIXED case.
 	 */
 	if (flags & MAP_FIXED) {
-		if (aliasing && flags & MAP_SHARED && addr & (SHMLBA - 1))
+		if (aliasing && flags & MAP_SHARED &&
+		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
 			return -EINVAL;
 		return addr;
 	}
diff --git a/arch/arm/plat-mxc/Makefile b/arch/arm/plat-mxc/Makefile
index 4cbca9d..996cbac 100644
--- a/arch/arm/plat-mxc/Makefile
+++ b/arch/arm/plat-mxc/Makefile
@@ -9,6 +9,7 @@
 obj-$(CONFIG_ARCH_MX2) += iomux-mx1-mx2.o dma-mx1-mx2.o
 obj-$(CONFIG_ARCH_MXC_IOMUX_V3) += iomux-v3.o
 obj-$(CONFIG_MXC_PWM)  += pwm.o
+obj-$(CONFIG_USB_EHCI_MXC) += ehci.o
 obj-$(CONFIG_MXC_ULPI) += ulpi.o
 obj-$(CONFIG_ARCH_MXC_AUDMUX_V1) += audmux-v1.o
 obj-$(CONFIG_ARCH_MXC_AUDMUX_V2) += audmux-v2.o
diff --git a/arch/arm/plat-mxc/ehci.c b/arch/arm/plat-mxc/ehci.c
new file mode 100644
index 0000000..41599be
--- /dev/null
+++ b/arch/arm/plat-mxc/ehci.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <mach/mxc_ehci.h>
+
+#define USBCTRL_OTGBASE_OFFSET	0x600
+
+#define MX31_OTG_SIC_SHIFT	29
+#define MX31_OTG_SIC_MASK	(0xf << MX31_OTG_SIC_SHIFT)
+#define MX31_OTG_PM_BIT		(1 << 24)
+
+#define MX31_H2_SIC_SHIFT	21
+#define MX31_H2_SIC_MASK	(0xf << MX31_H2_SIC_SHIFT)
+#define MX31_H2_PM_BIT		(1 << 16)
+#define MX31_H2_DT_BIT		(1 << 5)
+
+#define MX31_H1_SIC_SHIFT	13
+#define MX31_H1_SIC_MASK	(0xf << MX31_H1_SIC_SHIFT)
+#define MX31_H1_PM_BIT		(1 << 8)
+#define MX31_H1_DT_BIT		(1 << 4)
+
+int mxc_set_usbcontrol(int port, unsigned int flags)
+{
+	unsigned int v;
+
+	if (cpu_is_mx31()) {
+		v = readl(IO_ADDRESS(MX31_OTG_BASE_ADDR +
+				     USBCTRL_OTGBASE_OFFSET));
+
+		switch (port) {
+		case 0:	/* OTG port */
+			v &= ~(MX31_OTG_SIC_MASK | MX31_OTG_PM_BIT);
+			v |= (flags & MXC_EHCI_INTERFACE_MASK)
+					<< MX31_OTG_SIC_SHIFT;
+			if (flags & MXC_EHCI_POWER_PINS_ENABLED)
+				v |= MX31_OTG_PM_BIT;
+
+			break;
+		case 1: /* H1 port */
+			v &= ~(MX31_H1_SIC_MASK | MX31_H1_PM_BIT);
+			v |= (flags & MXC_EHCI_INTERFACE_MASK)
+						<< MX31_H1_SIC_SHIFT;
+			if (flags & MXC_EHCI_POWER_PINS_ENABLED)
+				v |= MX31_H1_PM_BIT;
+
+			if (!(flags & MXC_EHCI_TTL_ENABLED))
+				v |= MX31_H1_DT_BIT;
+
+			break;
+		case 2:	/* H2 port */
+			v &= ~(MX31_H2_SIC_MASK | MX31_H2_PM_BIT);
+			v |= (flags & MXC_EHCI_INTERFACE_MASK)
+						<< MX31_H2_SIC_SHIFT;
+			if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
+				v |= MX31_H2_PM_BIT;
+
+			if (!(flags & MXC_EHCI_TTL_ENABLED))
+				v |= MX31_H2_DT_BIT;
+
+			break;
+		}
+
+		writel(v, IO_ADDRESS(MX31_OTG_BASE_ADDR +
+				     USBCTRL_OTGBASE_OFFSET));
+		return 0;
+	}
+
+	printk(KERN_WARNING
+		"%s() unable to setup USBCONTROL for this CPU\n", __func__);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(mxc_set_usbcontrol);
+
diff --git a/arch/arm/plat-mxc/include/mach/mxc_ehci.h b/arch/arm/plat-mxc/include/mach/mxc_ehci.h
new file mode 100644
index 0000000..8f79623
--- /dev/null
+++ b/arch/arm/plat-mxc/include/mach/mxc_ehci.h
@@ -0,0 +1,37 @@
+#ifndef __INCLUDE_ASM_ARCH_MXC_EHCI_H
+#define __INCLUDE_ASM_ARCH_MXC_EHCI_H
+
+/* values for portsc field */
+#define MXC_EHCI_PHY_LOW_POWER_SUSPEND	(1 << 23)
+#define MXC_EHCI_FORCE_FS		(1 << 24)
+#define MXC_EHCI_UTMI_8BIT		(0 << 28)
+#define MXC_EHCI_UTMI_16BIT		(1 << 28)
+#define MXC_EHCI_SERIAL			(1 << 29)
+#define MXC_EHCI_MODE_UTMI		(0 << 30)
+#define MXC_EHCI_MODE_PHILIPS		(1 << 30)
+#define MXC_EHCI_MODE_ULPI		(2 << 30)
+#define MXC_EHCI_MODE_SERIAL		(3 << 30)
+
+/* values for flags field */
+#define MXC_EHCI_INTERFACE_DIFF_UNI	(0 << 0)
+#define MXC_EHCI_INTERFACE_DIFF_BI	(1 << 0)
+#define MXC_EHCI_INTERFACE_SINGLE_UNI	(2 << 0)
+#define MXC_EHCI_INTERFACE_SINGLE_BI	(3 << 0)
+#define MXC_EHCI_INTERFACE_MASK		(0xf)
+
+#define MXC_EHCI_POWER_PINS_ENABLED	(1 << 5)
+#define MXC_EHCI_TTL_ENABLED		(1 << 6)
+
+struct mxc_usbh_platform_data {
+	int (*init)(struct platform_device *pdev);
+	int (*exit)(struct platform_device *pdev);
+
+	unsigned int		 portsc;
+	unsigned int		 flags;
+	struct otg_transceiver	*otg;
+};
+
+int mxc_set_usbcontrol(int port, unsigned int flags);
+
+#endif /* __INCLUDE_ASM_ARCH_MXC_EHCI_H */
+
diff --git a/arch/arm/plat-omap/fb.c b/arch/arm/plat-omap/fb.c
index 78a4ce5..d3eea4f 100644
--- a/arch/arm/plat-omap/fb.c
+++ b/arch/arm/plat-omap/fb.c
@@ -28,13 +28,13 @@
 #include <linux/platform_device.h>
 #include <linux/bootmem.h>
 #include <linux/io.h>
+#include <linux/omapfb.h>
 
 #include <mach/hardware.h>
 #include <asm/mach/map.h>
 
 #include <plat/board.h>
 #include <plat/sram.h>
-#include <plat/omapfb.h>
 
 #if defined(CONFIG_FB_OMAP) || defined(CONFIG_FB_OMAP_MODULE)
 
@@ -55,6 +55,10 @@
 	.num_resources = 0,
 };
 
+void omapfb_set_platform_data(struct omapfb_platform_data *data)
+{
+}
+
 static inline int ranges_overlap(unsigned long start1, unsigned long size1,
 				 unsigned long start2, unsigned long size2)
 {
@@ -327,7 +331,33 @@
 
 arch_initcall(omap_init_fb);
 
-#else
+#elif defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
+
+static u64 omap_fb_dma_mask = ~(u32)0;
+static struct omapfb_platform_data omapfb_config;
+
+static struct platform_device omap_fb_device = {
+	.name		= "omapfb",
+	.id		= -1,
+	.dev = {
+		.dma_mask		= &omap_fb_dma_mask,
+		.coherent_dma_mask	= ~(u32)0,
+		.platform_data		= &omapfb_config,
+	},
+	.num_resources = 0,
+};
+
+void omapfb_set_platform_data(struct omapfb_platform_data *data)
+{
+	omapfb_config = *data;
+}
+
+static inline int omap_init_fb(void)
+{
+	return platform_device_register(&omap_fb_device);
+}
+
+arch_initcall(omap_init_fb);
 
 void omapfb_reserve_sdram(void) {}
 unsigned long omapfb_reserve_sram(unsigned long sram_pstart,
@@ -339,5 +369,20 @@
 	return 0;
 }
 
+#else
+
+void omapfb_set_platform_data(struct omapfb_platform_data *data)
+{
+}
+
+void omapfb_reserve_sdram(void) {}
+unsigned long omapfb_reserve_sram(unsigned long sram_pstart,
+				  unsigned long sram_vstart,
+				  unsigned long sram_size,
+				  unsigned long start_avail,
+				  unsigned long size_avail)
+{
+	return 0;
+}
 
 #endif
diff --git a/arch/arm/plat-omap/include/plat/display.h b/arch/arm/plat-omap/include/plat/display.h
new file mode 100644
index 0000000..c66e464
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/display.h
@@ -0,0 +1,575 @@
+/*
+ * linux/include/asm-arm/arch-omap/display.h
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_ARCH_OMAP_DISPLAY_H
+#define __ASM_ARCH_OMAP_DISPLAY_H
+
+#include <linux/list.h>
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <asm/atomic.h>
+
+#define DISPC_IRQ_FRAMEDONE		(1 << 0)
+#define DISPC_IRQ_VSYNC			(1 << 1)
+#define DISPC_IRQ_EVSYNC_EVEN		(1 << 2)
+#define DISPC_IRQ_EVSYNC_ODD		(1 << 3)
+#define DISPC_IRQ_ACBIAS_COUNT_STAT	(1 << 4)
+#define DISPC_IRQ_PROG_LINE_NUM		(1 << 5)
+#define DISPC_IRQ_GFX_FIFO_UNDERFLOW	(1 << 6)
+#define DISPC_IRQ_GFX_END_WIN		(1 << 7)
+#define DISPC_IRQ_PAL_GAMMA_MASK	(1 << 8)
+#define DISPC_IRQ_OCP_ERR		(1 << 9)
+#define DISPC_IRQ_VID1_FIFO_UNDERFLOW	(1 << 10)
+#define DISPC_IRQ_VID1_END_WIN		(1 << 11)
+#define DISPC_IRQ_VID2_FIFO_UNDERFLOW	(1 << 12)
+#define DISPC_IRQ_VID2_END_WIN		(1 << 13)
+#define DISPC_IRQ_SYNC_LOST		(1 << 14)
+#define DISPC_IRQ_SYNC_LOST_DIGIT	(1 << 15)
+#define DISPC_IRQ_WAKEUP		(1 << 16)
+
+struct omap_dss_device;
+struct omap_overlay_manager;
+
+enum omap_display_type {
+	OMAP_DISPLAY_TYPE_NONE		= 0,
+	OMAP_DISPLAY_TYPE_DPI		= 1 << 0,
+	OMAP_DISPLAY_TYPE_DBI		= 1 << 1,
+	OMAP_DISPLAY_TYPE_SDI		= 1 << 2,
+	OMAP_DISPLAY_TYPE_DSI		= 1 << 3,
+	OMAP_DISPLAY_TYPE_VENC		= 1 << 4,
+};
+
+enum omap_plane {
+	OMAP_DSS_GFX	= 0,
+	OMAP_DSS_VIDEO1	= 1,
+	OMAP_DSS_VIDEO2	= 2
+};
+
+enum omap_channel {
+	OMAP_DSS_CHANNEL_LCD	= 0,
+	OMAP_DSS_CHANNEL_DIGIT	= 1,
+};
+
+enum omap_color_mode {
+	OMAP_DSS_COLOR_CLUT1	= 1 << 0,  /* BITMAP 1 */
+	OMAP_DSS_COLOR_CLUT2	= 1 << 1,  /* BITMAP 2 */
+	OMAP_DSS_COLOR_CLUT4	= 1 << 2,  /* BITMAP 4 */
+	OMAP_DSS_COLOR_CLUT8	= 1 << 3,  /* BITMAP 8 */
+	OMAP_DSS_COLOR_RGB12U	= 1 << 4,  /* RGB12, 16-bit container */
+	OMAP_DSS_COLOR_ARGB16	= 1 << 5,  /* ARGB16 */
+	OMAP_DSS_COLOR_RGB16	= 1 << 6,  /* RGB16 */
+	OMAP_DSS_COLOR_RGB24U	= 1 << 7,  /* RGB24, 32-bit container */
+	OMAP_DSS_COLOR_RGB24P	= 1 << 8,  /* RGB24, 24-bit container */
+	OMAP_DSS_COLOR_YUV2	= 1 << 9,  /* YUV2 4:2:2 co-sited */
+	OMAP_DSS_COLOR_UYVY	= 1 << 10, /* UYVY 4:2:2 co-sited */
+	OMAP_DSS_COLOR_ARGB32	= 1 << 11, /* ARGB32 */
+	OMAP_DSS_COLOR_RGBA32	= 1 << 12, /* RGBA32 */
+	OMAP_DSS_COLOR_RGBX32	= 1 << 13, /* RGBx32 */
+
+	OMAP_DSS_COLOR_GFX_OMAP2 =
+		OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 |
+		OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 |
+		OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_RGB16 |
+		OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P,
+
+	OMAP_DSS_COLOR_VID_OMAP2 =
+		OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
+		OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 |
+		OMAP_DSS_COLOR_UYVY,
+
+	OMAP_DSS_COLOR_GFX_OMAP3 =
+		OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 |
+		OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 |
+		OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 |
+		OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
+		OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_ARGB32 |
+		OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32,
+
+	OMAP_DSS_COLOR_VID1_OMAP3 =
+		OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_RGB16 |
+		OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P |
+		OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_UYVY,
+
+	OMAP_DSS_COLOR_VID2_OMAP3 =
+		OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 |
+		OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
+		OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 |
+		OMAP_DSS_COLOR_UYVY | OMAP_DSS_COLOR_ARGB32 |
+		OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32,
+};
+
+enum omap_lcd_display_type {
+	OMAP_DSS_LCD_DISPLAY_STN,
+	OMAP_DSS_LCD_DISPLAY_TFT,
+};
+
+enum omap_dss_load_mode {
+	OMAP_DSS_LOAD_CLUT_AND_FRAME	= 0,
+	OMAP_DSS_LOAD_CLUT_ONLY		= 1,
+	OMAP_DSS_LOAD_FRAME_ONLY	= 2,
+	OMAP_DSS_LOAD_CLUT_ONCE_FRAME	= 3,
+};
+
+enum omap_dss_trans_key_type {
+	OMAP_DSS_COLOR_KEY_GFX_DST = 0,
+	OMAP_DSS_COLOR_KEY_VID_SRC = 1,
+};
+
+enum omap_rfbi_te_mode {
+	OMAP_DSS_RFBI_TE_MODE_1 = 1,
+	OMAP_DSS_RFBI_TE_MODE_2 = 2,
+};
+
+enum omap_panel_config {
+	OMAP_DSS_LCD_IVS		= 1<<0,
+	OMAP_DSS_LCD_IHS		= 1<<1,
+	OMAP_DSS_LCD_IPC		= 1<<2,
+	OMAP_DSS_LCD_IEO		= 1<<3,
+	OMAP_DSS_LCD_RF			= 1<<4,
+	OMAP_DSS_LCD_ONOFF		= 1<<5,
+
+	OMAP_DSS_LCD_TFT		= 1<<20,
+};
+
+enum omap_dss_venc_type {
+	OMAP_DSS_VENC_TYPE_COMPOSITE,
+	OMAP_DSS_VENC_TYPE_SVIDEO,
+};
+
+enum omap_display_caps {
+	OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE	= 1 << 0,
+	OMAP_DSS_DISPLAY_CAP_TEAR_ELIM		= 1 << 1,
+};
+
+enum omap_dss_update_mode {
+	OMAP_DSS_UPDATE_DISABLED = 0,
+	OMAP_DSS_UPDATE_AUTO,
+	OMAP_DSS_UPDATE_MANUAL,
+};
+
+enum omap_dss_display_state {
+	OMAP_DSS_DISPLAY_DISABLED = 0,
+	OMAP_DSS_DISPLAY_ACTIVE,
+	OMAP_DSS_DISPLAY_SUSPENDED,
+};
+
+/* XXX perhaps this should be removed */
+enum omap_dss_overlay_managers {
+	OMAP_DSS_OVL_MGR_LCD,
+	OMAP_DSS_OVL_MGR_TV,
+};
+
+enum omap_dss_rotation_type {
+	OMAP_DSS_ROT_DMA = 0,
+	OMAP_DSS_ROT_VRFB = 1,
+};
+
+/* clockwise rotation angle */
+enum omap_dss_rotation_angle {
+	OMAP_DSS_ROT_0   = 0,
+	OMAP_DSS_ROT_90  = 1,
+	OMAP_DSS_ROT_180 = 2,
+	OMAP_DSS_ROT_270 = 3,
+};
+
+enum omap_overlay_caps {
+	OMAP_DSS_OVL_CAP_SCALE = 1 << 0,
+	OMAP_DSS_OVL_CAP_DISPC = 1 << 1,
+};
+
+enum omap_overlay_manager_caps {
+	OMAP_DSS_OVL_MGR_CAP_DISPC = 1 << 0,
+};
+
+/* RFBI */
+
+struct rfbi_timings {
+	int cs_on_time;
+	int cs_off_time;
+	int we_on_time;
+	int we_off_time;
+	int re_on_time;
+	int re_off_time;
+	int we_cycle_time;
+	int re_cycle_time;
+	int cs_pulse_width;
+	int access_time;
+
+	int clk_div;
+
+	u32 tim[5];             /* set by rfbi_convert_timings() */
+
+	int converted;
+};
+
+void omap_rfbi_write_command(const void *buf, u32 len);
+void omap_rfbi_read_data(void *buf, u32 len);
+void omap_rfbi_write_data(const void *buf, u32 len);
+void omap_rfbi_write_pixels(const void __iomem *buf, int scr_width,
+		u16 x, u16 y,
+		u16 w, u16 h);
+int omap_rfbi_enable_te(bool enable, unsigned line);
+int omap_rfbi_setup_te(enum omap_rfbi_te_mode mode,
+			     unsigned hs_pulse_time, unsigned vs_pulse_time,
+			     int hs_pol_inv, int vs_pol_inv, int extif_div);
+
+/* DSI */
+void dsi_bus_lock(void);
+void dsi_bus_unlock(void);
+int dsi_vc_dcs_write(int channel, u8 *data, int len);
+int dsi_vc_dcs_write_nosync(int channel, u8 *data, int len);
+int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen);
+int dsi_vc_set_max_rx_packet_size(int channel, u16 len);
+int dsi_vc_send_null(int channel);
+int dsi_vc_send_bta_sync(int channel);
+
+/* Board specific data */
+struct omap_dss_board_info {
+	int (*get_last_off_on_transaction_id)(struct device *dev);
+	int num_devices;
+	struct omap_dss_device **devices;
+	struct omap_dss_device *default_device;
+};
+
+struct omap_video_timings {
+	/* Unit: pixels */
+	u16 x_res;
+	/* Unit: pixels */
+	u16 y_res;
+	/* Unit: KHz */
+	u32 pixel_clock;
+	/* Unit: pixel clocks */
+	u16 hsw;	/* Horizontal synchronization pulse width */
+	/* Unit: pixel clocks */
+	u16 hfp;	/* Horizontal front porch */
+	/* Unit: pixel clocks */
+	u16 hbp;	/* Horizontal back porch */
+	/* Unit: line clocks */
+	u16 vsw;	/* Vertical synchronization pulse width */
+	/* Unit: line clocks */
+	u16 vfp;	/* Vertical front porch */
+	/* Unit: line clocks */
+	u16 vbp;	/* Vertical back porch */
+};
+
+#ifdef CONFIG_OMAP2_DSS_VENC
+/* Hardcoded timings for tv modes. Venc only uses these to
+ * identify the mode, and does not actually use the configs
+ * itself. However, the configs should be something that
+ * a normal monitor can also show */
+const extern struct omap_video_timings omap_dss_pal_timings;
+const extern struct omap_video_timings omap_dss_ntsc_timings;
+#endif
+
+struct omap_overlay_info {
+	bool enabled;
+
+	u32 paddr;
+	void __iomem *vaddr;
+	u16 screen_width;
+	u16 width;
+	u16 height;
+	enum omap_color_mode color_mode;
+	u8 rotation;
+	enum omap_dss_rotation_type rotation_type;
+	bool mirror;
+
+	u16 pos_x;
+	u16 pos_y;
+	u16 out_width;	/* if 0, out_width == width */
+	u16 out_height;	/* if 0, out_height == height */
+	u8 global_alpha;
+};
+
+struct omap_overlay {
+	struct kobject kobj;
+	struct list_head list;
+
+	/* static fields */
+	const char *name;
+	int id;
+	enum omap_color_mode supported_modes;
+	enum omap_overlay_caps caps;
+
+	/* dynamic fields */
+	struct omap_overlay_manager *manager;
+	struct omap_overlay_info info;
+
+	/* if true, info has been changed, but not applied() yet */
+	bool info_dirty;
+
+	int (*set_manager)(struct omap_overlay *ovl,
+		struct omap_overlay_manager *mgr);
+	int (*unset_manager)(struct omap_overlay *ovl);
+
+	int (*set_overlay_info)(struct omap_overlay *ovl,
+			struct omap_overlay_info *info);
+	void (*get_overlay_info)(struct omap_overlay *ovl,
+			struct omap_overlay_info *info);
+
+	int (*wait_for_go)(struct omap_overlay *ovl);
+};
+
+struct omap_overlay_manager_info {
+	u32 default_color;
+
+	enum omap_dss_trans_key_type trans_key_type;
+	u32 trans_key;
+	bool trans_enabled;
+
+	bool alpha_enabled;
+};
+
+struct omap_overlay_manager {
+	struct kobject kobj;
+	struct list_head list;
+
+	/* static fields */
+	const char *name;
+	int id;
+	enum omap_overlay_manager_caps caps;
+	int num_overlays;
+	struct omap_overlay **overlays;
+	enum omap_display_type supported_displays;
+
+	/* dynamic fields */
+	struct omap_dss_device *device;
+	struct omap_overlay_manager_info info;
+
+	bool device_changed;
+	/* if true, info has been changed but not applied() yet */
+	bool info_dirty;
+
+	int (*set_device)(struct omap_overlay_manager *mgr,
+		struct omap_dss_device *dssdev);
+	int (*unset_device)(struct omap_overlay_manager *mgr);
+
+	int (*set_manager_info)(struct omap_overlay_manager *mgr,
+			struct omap_overlay_manager_info *info);
+	void (*get_manager_info)(struct omap_overlay_manager *mgr,
+			struct omap_overlay_manager_info *info);
+
+	int (*apply)(struct omap_overlay_manager *mgr);
+	int (*wait_for_go)(struct omap_overlay_manager *mgr);
+};
+
+struct omap_dss_device {
+	struct device dev;
+
+	enum omap_display_type type;
+
+	union {
+		struct {
+			u8 data_lines;
+		} dpi;
+
+		struct {
+			u8 channel;
+			u8 data_lines;
+		} rfbi;
+
+		struct {
+			u8 datapairs;
+		} sdi;
+
+		struct {
+			u8 clk_lane;
+			u8 clk_pol;
+			u8 data1_lane;
+			u8 data1_pol;
+			u8 data2_lane;
+			u8 data2_pol;
+
+			struct {
+				u16 regn;
+				u16 regm;
+				u16 regm3;
+				u16 regm4;
+
+				u16 lp_clk_div;
+
+				u16 lck_div;
+				u16 pck_div;
+			} div;
+
+			bool ext_te;
+			u8 ext_te_gpio;
+		} dsi;
+
+		struct {
+			enum omap_dss_venc_type type;
+			bool invert_polarity;
+		} venc;
+	} phy;
+
+	struct {
+		struct omap_video_timings timings;
+
+		int acbi;	/* ac-bias pin transitions per interrupt */
+		/* Unit: line clocks */
+		int acb;	/* ac-bias pin frequency */
+
+		enum omap_panel_config config;
+
+		u8 recommended_bpp;
+
+		struct omap_dss_device *ctrl;
+	} panel;
+
+	struct {
+		u8 pixel_size;
+		struct rfbi_timings rfbi_timings;
+		struct omap_dss_device *panel;
+	} ctrl;
+
+	int reset_gpio;
+
+	int max_backlight_level;
+
+	const char *name;
+
+	/* used to match device to driver */
+	const char *driver_name;
+
+	void *data;
+
+	struct omap_dss_driver *driver;
+
+	/* helper variable for driver suspend/resume */
+	bool activate_after_resume;
+
+	enum omap_display_caps caps;
+
+	struct omap_overlay_manager *manager;
+
+	enum omap_dss_display_state state;
+
+	int (*enable)(struct omap_dss_device *dssdev);
+	void (*disable)(struct omap_dss_device *dssdev);
+
+	int (*suspend)(struct omap_dss_device *dssdev);
+	int (*resume)(struct omap_dss_device *dssdev);
+
+	void (*get_resolution)(struct omap_dss_device *dssdev,
+			u16 *xres, u16 *yres);
+	int (*get_recommended_bpp)(struct omap_dss_device *dssdev);
+
+	int (*check_timings)(struct omap_dss_device *dssdev,
+			struct omap_video_timings *timings);
+	void (*set_timings)(struct omap_dss_device *dssdev,
+			struct omap_video_timings *timings);
+	void (*get_timings)(struct omap_dss_device *dssdev,
+			struct omap_video_timings *timings);
+	int (*update)(struct omap_dss_device *dssdev,
+			       u16 x, u16 y, u16 w, u16 h);
+	int (*sync)(struct omap_dss_device *dssdev);
+	int (*wait_vsync)(struct omap_dss_device *dssdev);
+
+	int (*set_update_mode)(struct omap_dss_device *dssdev,
+			enum omap_dss_update_mode);
+	enum omap_dss_update_mode (*get_update_mode)
+		(struct omap_dss_device *dssdev);
+
+	int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
+	int (*get_te)(struct omap_dss_device *dssdev);
+
+	u8 (*get_rotate)(struct omap_dss_device *dssdev);
+	int (*set_rotate)(struct omap_dss_device *dssdev, u8 rotate);
+
+	bool (*get_mirror)(struct omap_dss_device *dssdev);
+	int (*set_mirror)(struct omap_dss_device *dssdev, bool enable);
+
+	int (*run_test)(struct omap_dss_device *dssdev, int test);
+	int (*memory_read)(struct omap_dss_device *dssdev,
+			void *buf, size_t size,
+			u16 x, u16 y, u16 w, u16 h);
+
+	int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
+	u32 (*get_wss)(struct omap_dss_device *dssdev);
+
+	/* platform specific  */
+	int (*platform_enable)(struct omap_dss_device *dssdev);
+	void (*platform_disable)(struct omap_dss_device *dssdev);
+	int (*set_backlight)(struct omap_dss_device *dssdev, int level);
+	int (*get_backlight)(struct omap_dss_device *dssdev);
+};
+
+struct omap_dss_driver {
+	struct device_driver driver;
+
+	int (*probe)(struct omap_dss_device *);
+	void (*remove)(struct omap_dss_device *);
+
+	int (*enable)(struct omap_dss_device *display);
+	void (*disable)(struct omap_dss_device *display);
+	int (*suspend)(struct omap_dss_device *display);
+	int (*resume)(struct omap_dss_device *display);
+	int (*run_test)(struct omap_dss_device *display, int test);
+
+	void (*setup_update)(struct omap_dss_device *dssdev,
+			u16 x, u16 y, u16 w, u16 h);
+
+	int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
+	int (*wait_for_te)(struct omap_dss_device *dssdev);
+
+	u8 (*get_rotate)(struct omap_dss_device *dssdev);
+	int (*set_rotate)(struct omap_dss_device *dssdev, u8 rotate);
+
+	bool (*get_mirror)(struct omap_dss_device *dssdev);
+	int (*set_mirror)(struct omap_dss_device *dssdev, bool enable);
+
+	int (*memory_read)(struct omap_dss_device *dssdev,
+			void *buf, size_t size,
+			u16 x, u16 y, u16 w, u16 h);
+};
+
+int omap_dss_register_driver(struct omap_dss_driver *);
+void omap_dss_unregister_driver(struct omap_dss_driver *);
+
+int omap_dss_register_device(struct omap_dss_device *);
+void omap_dss_unregister_device(struct omap_dss_device *);
+
+void omap_dss_get_device(struct omap_dss_device *dssdev);
+void omap_dss_put_device(struct omap_dss_device *dssdev);
+#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL)
+struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from);
+struct omap_dss_device *omap_dss_find_device(void *data,
+		int (*match)(struct omap_dss_device *dssdev, void *data));
+
+int omap_dss_start_device(struct omap_dss_device *dssdev);
+void omap_dss_stop_device(struct omap_dss_device *dssdev);
+
+int omap_dss_get_num_overlay_managers(void);
+struct omap_overlay_manager *omap_dss_get_overlay_manager(int num);
+
+int omap_dss_get_num_overlays(void);
+struct omap_overlay *omap_dss_get_overlay(int num);
+
+typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
+int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
+int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
+
+int omap_dispc_wait_for_irq_timeout(u32 irqmask, unsigned long timeout);
+int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
+		unsigned long timeout);
+
+#define to_dss_driver(x) container_of((x), struct omap_dss_driver, driver)
+#define to_dss_device(x) container_of((x), struct omap_dss_device, dev)
+
+#endif
diff --git a/arch/arm/plat-omap/include/plat/omapfb.h b/arch/arm/plat-omap/include/plat/omapfb.h
deleted file mode 100644
index bfef7ab..0000000
--- a/arch/arm/plat-omap/include/plat/omapfb.h
+++ /dev/null
@@ -1,398 +0,0 @@
-/*
- * File: arch/arm/plat-omap/include/mach/omapfb.h
- *
- * Framebuffer driver for TI OMAP boards
- *
- * Copyright (C) 2004 Nokia Corporation
- * Author: Imre Deak <imre.deak@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- */
-
-#ifndef __OMAPFB_H
-#define __OMAPFB_H
-
-#include <asm/ioctl.h>
-#include <asm/types.h>
-
-/* IOCTL commands. */
-
-#define OMAP_IOW(num, dtype)	_IOW('O', num, dtype)
-#define OMAP_IOR(num, dtype)	_IOR('O', num, dtype)
-#define OMAP_IOWR(num, dtype)	_IOWR('O', num, dtype)
-#define OMAP_IO(num)		_IO('O', num)
-
-#define OMAPFB_MIRROR		OMAP_IOW(31, int)
-#define OMAPFB_SYNC_GFX		OMAP_IO(37)
-#define OMAPFB_VSYNC		OMAP_IO(38)
-#define OMAPFB_SET_UPDATE_MODE	OMAP_IOW(40, int)
-#define OMAPFB_GET_CAPS		OMAP_IOR(42, struct omapfb_caps)
-#define OMAPFB_GET_UPDATE_MODE	OMAP_IOW(43, int)
-#define OMAPFB_LCD_TEST		OMAP_IOW(45, int)
-#define OMAPFB_CTRL_TEST	OMAP_IOW(46, int)
-#define OMAPFB_UPDATE_WINDOW_OLD OMAP_IOW(47, struct omapfb_update_window_old)
-#define OMAPFB_SET_COLOR_KEY	OMAP_IOW(50, struct omapfb_color_key)
-#define OMAPFB_GET_COLOR_KEY	OMAP_IOW(51, struct omapfb_color_key)
-#define OMAPFB_SETUP_PLANE	OMAP_IOW(52, struct omapfb_plane_info)
-#define OMAPFB_QUERY_PLANE	OMAP_IOW(53, struct omapfb_plane_info)
-#define OMAPFB_UPDATE_WINDOW	OMAP_IOW(54, struct omapfb_update_window)
-#define OMAPFB_SETUP_MEM	OMAP_IOW(55, struct omapfb_mem_info)
-#define OMAPFB_QUERY_MEM	OMAP_IOW(56, struct omapfb_mem_info)
-
-#define OMAPFB_CAPS_GENERIC_MASK	0x00000fff
-#define OMAPFB_CAPS_LCDC_MASK		0x00fff000
-#define OMAPFB_CAPS_PANEL_MASK		0xff000000
-
-#define OMAPFB_CAPS_MANUAL_UPDATE	0x00001000
-#define OMAPFB_CAPS_TEARSYNC		0x00002000
-#define OMAPFB_CAPS_PLANE_RELOCATE_MEM	0x00004000
-#define OMAPFB_CAPS_PLANE_SCALE		0x00008000
-#define OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE	0x00010000
-#define OMAPFB_CAPS_WINDOW_SCALE	0x00020000
-#define OMAPFB_CAPS_WINDOW_OVERLAY	0x00040000
-#define OMAPFB_CAPS_WINDOW_ROTATE	0x00080000
-#define OMAPFB_CAPS_SET_BACKLIGHT	0x01000000
-
-/* Values from DSP must map to lower 16-bits */
-#define OMAPFB_FORMAT_MASK		0x00ff
-#define OMAPFB_FORMAT_FLAG_DOUBLE	0x0100
-#define OMAPFB_FORMAT_FLAG_TEARSYNC	0x0200
-#define OMAPFB_FORMAT_FLAG_FORCE_VSYNC	0x0400
-#define OMAPFB_FORMAT_FLAG_ENABLE_OVERLAY	0x0800
-#define OMAPFB_FORMAT_FLAG_DISABLE_OVERLAY	0x1000
-
-#define OMAPFB_EVENT_READY	1
-#define OMAPFB_EVENT_DISABLED	2
-
-#define OMAPFB_MEMTYPE_SDRAM		0
-#define OMAPFB_MEMTYPE_SRAM		1
-#define OMAPFB_MEMTYPE_MAX		1
-
-enum omapfb_color_format {
-	OMAPFB_COLOR_RGB565 = 0,
-	OMAPFB_COLOR_YUV422,
-	OMAPFB_COLOR_YUV420,
-	OMAPFB_COLOR_CLUT_8BPP,
-	OMAPFB_COLOR_CLUT_4BPP,
-	OMAPFB_COLOR_CLUT_2BPP,
-	OMAPFB_COLOR_CLUT_1BPP,
-	OMAPFB_COLOR_RGB444,
-	OMAPFB_COLOR_YUY422,
-};
-
-struct omapfb_update_window {
-	__u32 x, y;
-	__u32 width, height;
-	__u32 format;
-	__u32 out_x, out_y;
-	__u32 out_width, out_height;
-	__u32 reserved[8];
-};
-
-struct omapfb_update_window_old {
-	__u32 x, y;
-	__u32 width, height;
-	__u32 format;
-};
-
-enum omapfb_plane {
-	OMAPFB_PLANE_GFX = 0,
-	OMAPFB_PLANE_VID1,
-	OMAPFB_PLANE_VID2,
-};
-
-enum omapfb_channel_out {
-	OMAPFB_CHANNEL_OUT_LCD = 0,
-	OMAPFB_CHANNEL_OUT_DIGIT,
-};
-
-struct omapfb_plane_info {
-	__u32 pos_x;
-	__u32 pos_y;
-	__u8  enabled;
-	__u8  channel_out;
-	__u8  mirror;
-	__u8  reserved1;
-	__u32 out_width;
-	__u32 out_height;
-	__u32 reserved2[12];
-};
-
-struct omapfb_mem_info {
-	__u32 size;
-	__u8  type;
-	__u8  reserved[3];
-};
-
-struct omapfb_caps {
-	__u32 ctrl;
-	__u32 plane_color;
-	__u32 wnd_color;
-};
-
-enum omapfb_color_key_type {
-	OMAPFB_COLOR_KEY_DISABLED = 0,
-	OMAPFB_COLOR_KEY_GFX_DST,
-	OMAPFB_COLOR_KEY_VID_SRC,
-};
-
-struct omapfb_color_key {
-	__u8  channel_out;
-	__u32 background;
-	__u32 trans_key;
-	__u8  key_type;
-};
-
-enum omapfb_update_mode {
-	OMAPFB_UPDATE_DISABLED = 0,
-	OMAPFB_AUTO_UPDATE,
-	OMAPFB_MANUAL_UPDATE
-};
-
-#ifdef __KERNEL__
-
-#include <linux/completion.h>
-#include <linux/interrupt.h>
-#include <linux/fb.h>
-#include <linux/mutex.h>
-
-#include <plat/board.h>
-
-#define OMAP_LCDC_INV_VSYNC             0x0001
-#define OMAP_LCDC_INV_HSYNC             0x0002
-#define OMAP_LCDC_INV_PIX_CLOCK         0x0004
-#define OMAP_LCDC_INV_OUTPUT_EN         0x0008
-#define OMAP_LCDC_HSVS_RISING_EDGE      0x0010
-#define OMAP_LCDC_HSVS_OPPOSITE         0x0020
-
-#define OMAP_LCDC_SIGNAL_MASK		0x003f
-
-#define OMAP_LCDC_PANEL_TFT		0x0100
-
-#define OMAPFB_PLANE_XRES_MIN		8
-#define OMAPFB_PLANE_YRES_MIN		8
-
-#ifdef CONFIG_ARCH_OMAP1
-#define OMAPFB_PLANE_NUM		1
-#else
-#define OMAPFB_PLANE_NUM		3
-#endif
-
-struct omapfb_device;
-
-struct lcd_panel {
-	const char	*name;
-	int		config;		/* TFT/STN, signal inversion */
-	int		bpp;		/* Pixel format in fb mem */
-	int		data_lines;	/* Lines on LCD HW interface */
-
-	int		x_res, y_res;
-	int		pixel_clock;	/* In kHz */
-	int		hsw;		/* Horizontal synchronization
-					   pulse width */
-	int		hfp;		/* Horizontal front porch */
-	int		hbp;		/* Horizontal back porch */
-	int		vsw;		/* Vertical synchronization
-					   pulse width */
-	int		vfp;		/* Vertical front porch */
-	int		vbp;		/* Vertical back porch */
-	int		acb;		/* ac-bias pin frequency */
-	int		pcd;		/* pixel clock divider.
-					   Obsolete use pixel_clock instead */
-
-	int		(*init)		(struct lcd_panel *panel,
-					 struct omapfb_device *fbdev);
-	void		(*cleanup)	(struct lcd_panel *panel);
-	int		(*enable)	(struct lcd_panel *panel);
-	void		(*disable)	(struct lcd_panel *panel);
-	unsigned long	(*get_caps)	(struct lcd_panel *panel);
-	int		(*set_bklight_level)(struct lcd_panel *panel,
-					     unsigned int level);
-	unsigned int	(*get_bklight_level)(struct lcd_panel *panel);
-	unsigned int	(*get_bklight_max)  (struct lcd_panel *panel);
-	int		(*run_test)	(struct lcd_panel *panel, int test_num);
-};
-
-struct extif_timings {
-	int cs_on_time;
-	int cs_off_time;
-	int we_on_time;
-	int we_off_time;
-	int re_on_time;
-	int re_off_time;
-	int we_cycle_time;
-	int re_cycle_time;
-	int cs_pulse_width;
-	int access_time;
-
-	int clk_div;
-
-	u32 tim[5];		/* set by extif->convert_timings */
-
-	int converted;
-};
-
-struct lcd_ctrl_extif {
-	int  (*init)		(struct omapfb_device *fbdev);
-	void (*cleanup)		(void);
-	void (*get_clk_info)	(u32 *clk_period, u32 *max_clk_div);
-	unsigned long (*get_max_tx_rate)(void);
-	int  (*convert_timings)	(struct extif_timings *timings);
-	void (*set_timings)	(const struct extif_timings *timings);
-	void (*set_bits_per_cycle)(int bpc);
-	void (*write_command)	(const void *buf, unsigned int len);
-	void (*read_data)	(void *buf, unsigned int len);
-	void (*write_data)	(const void *buf, unsigned int len);
-	void (*transfer_area)	(int width, int height,
-				 void (callback)(void * data), void *data);
-	int  (*setup_tearsync)	(unsigned pin_cnt,
-				 unsigned hs_pulse_time, unsigned vs_pulse_time,
-				 int hs_pol_inv, int vs_pol_inv, int div);
-	int  (*enable_tearsync) (int enable, unsigned line);
-
-	unsigned long		max_transmit_size;
-};
-
-struct omapfb_notifier_block {
-	struct notifier_block	nb;
-	void			*data;
-	int			plane_idx;
-};
-
-typedef int (*omapfb_notifier_callback_t)(struct notifier_block *,
-					  unsigned long event,
-					  void *fbi);
-
-struct omapfb_mem_region {
-	u32		paddr;
-	void __iomem	*vaddr;
-	unsigned long	size;
-	u8		type;		/* OMAPFB_PLANE_MEM_* */
-	unsigned	alloc:1;	/* allocated by the driver */
-	unsigned	map:1;		/* kernel mapped by the driver */
-};
-
-struct omapfb_mem_desc {
-	int				region_cnt;
-	struct omapfb_mem_region	region[OMAPFB_PLANE_NUM];
-};
-
-struct lcd_ctrl {
-	const char	*name;
-	void		*data;
-
-	int		(*init)		  (struct omapfb_device *fbdev,
-					   int ext_mode,
-					   struct omapfb_mem_desc *req_md);
-	void		(*cleanup)	  (void);
-	void		(*bind_client)	  (struct omapfb_notifier_block *nb);
-	void		(*get_caps)	  (int plane, struct omapfb_caps *caps);
-	int		(*set_update_mode)(enum omapfb_update_mode mode);
-	enum omapfb_update_mode (*get_update_mode)(void);
-	int		(*setup_plane)	  (int plane, int channel_out,
-					   unsigned long offset,
-					   int screen_width,
-					   int pos_x, int pos_y, int width,
-					   int height, int color_mode);
-	int		(*set_rotate)	  (int angle);
-	int		(*setup_mem)	  (int plane, size_t size,
-					   int mem_type, unsigned long *paddr);
-	int		(*mmap)		  (struct fb_info *info,
-					   struct vm_area_struct *vma);
-	int		(*set_scale)	  (int plane,
-					   int orig_width, int orig_height,
-					   int out_width, int out_height);
-	int		(*enable_plane)	  (int plane, int enable);
-	int		(*update_window)  (struct fb_info *fbi,
-					   struct omapfb_update_window *win,
-					   void (*callback)(void *),
-					   void *callback_data);
-	void		(*sync)		  (void);
-	void		(*suspend)	  (void);
-	void		(*resume)	  (void);
-	int		(*run_test)	  (int test_num);
-	int		(*setcolreg)	  (u_int regno, u16 red, u16 green,
-					   u16 blue, u16 transp,
-					   int update_hw_mem);
-	int		(*set_color_key)  (struct omapfb_color_key *ck);
-	int		(*get_color_key)  (struct omapfb_color_key *ck);
-};
-
-enum omapfb_state {
-	OMAPFB_DISABLED	= 0,
-	OMAPFB_SUSPENDED= 99,
-	OMAPFB_ACTIVE	= 100
-};
-
-struct omapfb_plane_struct {
-	int				idx;
-	struct omapfb_plane_info	info;
-	enum omapfb_color_format	color_mode;
-	struct omapfb_device		*fbdev;
-};
-
-struct omapfb_device {
-	int			state;
-	int                     ext_lcdc;               /* Using external
-                                                           LCD controller */
-	struct mutex		rqueue_mutex;
-
-	int			palette_size;
-	u32			pseudo_palette[17];
-
-	struct lcd_panel	*panel;			/* LCD panel */
-	const struct lcd_ctrl	*ctrl;			/* LCD controller */
-	const struct lcd_ctrl	*int_ctrl;		/* internal LCD ctrl */
-	struct lcd_ctrl_extif	*ext_if;		/* LCD ctrl external
-							   interface */
-	struct device		*dev;
-	struct fb_var_screeninfo	new_var;	/* for mode changes */
-
-	struct omapfb_mem_desc		mem_desc;
-	struct fb_info			*fb_info[OMAPFB_PLANE_NUM];
-};
-
-struct omapfb_platform_data {
-	struct omap_lcd_config		lcd;
-	struct omapfb_mem_desc		mem_desc;
-	void				*ctrl_platform_data;
-};
-
-#ifdef CONFIG_ARCH_OMAP1
-extern struct lcd_ctrl omap1_lcd_ctrl;
-#else
-extern struct lcd_ctrl omap2_disp_ctrl;
-#endif
-
-extern void omapfb_reserve_sdram(void);
-extern void omapfb_register_panel(struct lcd_panel *panel);
-extern void omapfb_write_first_pixel(struct omapfb_device *fbdev, u16 pixval);
-extern void omapfb_notify_clients(struct omapfb_device *fbdev,
-				  unsigned long event);
-extern int  omapfb_register_client(struct omapfb_notifier_block *nb,
-				   omapfb_notifier_callback_t callback,
-				   void *callback_data);
-extern int  omapfb_unregister_client(struct omapfb_notifier_block *nb);
-extern int  omapfb_update_window_async(struct fb_info *fbi,
-				       struct omapfb_update_window *win,
-				       void (*callback)(void *),
-				       void *callback_data);
-
-/* in arch/arm/plat-omap/fb.c */
-extern void omapfb_set_ctrl_platform_data(void *pdata);
-
-#endif /* __KERNEL__ */
-
-#endif /* __OMAPFB_H */
diff --git a/arch/arm/plat-omap/include/plat/sdrc.h b/arch/arm/plat-omap/include/plat/sdrc.h
index f704030..7b76f50 100644
--- a/arch/arm/plat-omap/include/plat/sdrc.h
+++ b/arch/arm/plat-omap/include/plat/sdrc.h
@@ -94,7 +94,10 @@
 
 /* SMS register offsets - read/write with sms_{read,write}_reg() */
 
-#define SMS_SYSCONFIG		0x010
+#define SMS_SYSCONFIG			0x010
+#define SMS_ROT_CONTROL(context)	(0x180 + 0x10 * context)
+#define SMS_ROT_SIZE(context)		(0x184 + 0x10 * context)
+#define SMS_ROT_PHYSICAL_BA(context)	(0x188 + 0x10 * context)
 /* REVISIT: fill in other SMS registers here */
 
 
@@ -129,6 +132,10 @@
 void omap2_sms_save_context(void);
 void omap2_sms_restore_context(void);
 
+void omap2_sms_write_rot_control(u32 val, unsigned ctx);
+void omap2_sms_write_rot_size(u32 val, unsigned ctx);
+void omap2_sms_write_rot_physical_ba(u32 val, unsigned ctx);
+
 #ifdef CONFIG_ARCH_OMAP2
 
 struct memory_timings {
diff --git a/arch/arm/plat-omap/include/plat/vram.h b/arch/arm/plat-omap/include/plat/vram.h
new file mode 100644
index 0000000..edd4987
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/vram.h
@@ -0,0 +1,62 @@
+/*
+ * VRAM manager for OMAP
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#ifndef __OMAP_VRAM_H__
+#define __OMAP_VRAM_H__
+
+#include <linux/types.h>
+
+#define OMAP_VRAM_MEMTYPE_SDRAM		0
+#define OMAP_VRAM_MEMTYPE_SRAM		1
+#define OMAP_VRAM_MEMTYPE_MAX		1
+
+extern int omap_vram_add_region(unsigned long paddr, size_t size);
+extern int omap_vram_free(unsigned long paddr, size_t size);
+extern int omap_vram_alloc(int mtype, size_t size, unsigned long *paddr);
+extern int omap_vram_reserve(unsigned long paddr, size_t size);
+extern void omap_vram_get_info(unsigned long *vram, unsigned long *free_vram,
+		unsigned long *largest_free_block);
+
+#ifdef CONFIG_OMAP2_VRAM
+extern void omap_vram_set_sdram_vram(u32 size, u32 start);
+extern void omap_vram_set_sram_vram(u32 size, u32 start);
+
+extern void omap_vram_reserve_sdram(void);
+extern unsigned long omap_vram_reserve_sram(unsigned long sram_pstart,
+					    unsigned long sram_vstart,
+					    unsigned long sram_size,
+					    unsigned long pstart_avail,
+					    unsigned long size_avail);
+#else
+static inline void omap_vram_set_sdram_vram(u32 size, u32 start) { }
+static inline void omap_vram_set_sram_vram(u32 size, u32 start) { }
+
+static inline void omap_vram_reserve_sdram(void) { }
+static inline unsigned long omap_vram_reserve_sram(unsigned long sram_pstart,
+					    unsigned long sram_vstart,
+					    unsigned long sram_size,
+					    unsigned long pstart_avail,
+					    unsigned long size_avail)
+{
+	return 0;
+}
+#endif
+
+#endif
diff --git a/arch/arm/plat-omap/include/plat/vrfb.h b/arch/arm/plat-omap/include/plat/vrfb.h
new file mode 100644
index 0000000..d8a03ce
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/vrfb.h
@@ -0,0 +1,50 @@
+/*
+ * VRFB Rotation Engine
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#ifndef __OMAP_VRFB_H__
+#define __OMAP_VRFB_H__
+
+#define OMAP_VRFB_LINE_LEN 2048
+
+struct vrfb {
+	u8 context;
+	void __iomem *vaddr[4];
+	unsigned long paddr[4];
+	u16 xres;
+	u16 yres;
+	u16 xoffset;
+	u16 yoffset;
+	u8 bytespp;
+	bool yuv_mode;
+};
+
+extern int omap_vrfb_request_ctx(struct vrfb *vrfb);
+extern void omap_vrfb_release_ctx(struct vrfb *vrfb);
+extern void omap_vrfb_adjust_size(u16 *width, u16 *height,
+		u8 bytespp);
+extern u32 omap_vrfb_min_phys_size(u16 width, u16 height, u8 bytespp);
+extern u16 omap_vrfb_max_height(u32 phys_size, u16 width, u8 bytespp);
+extern void omap_vrfb_setup(struct vrfb *vrfb, unsigned long paddr,
+		u16 width, u16 height,
+		unsigned bytespp, bool yuv_mode);
+extern int omap_vrfb_map_angle(struct vrfb *vrfb, u16 height, u8 rot);
+extern void omap_vrfb_restore_context(void);
+
+#endif /* __VRFB_H */
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 3e92366..ad2bf07 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -28,6 +28,7 @@
 #include <plat/sram.h>
 #include <plat/board.h>
 #include <plat/cpu.h>
+#include <plat/vram.h>
 
 #include <plat/control.h>
 
@@ -185,6 +186,13 @@
 				       omap_sram_start + SRAM_BOOTLOADER_SZ,
 				       omap_sram_size - SRAM_BOOTLOADER_SZ);
 	omap_sram_size -= reserved;
+
+	reserved = omap_vram_reserve_sram(omap_sram_start, omap_sram_base,
+			omap_sram_size,
+			omap_sram_start + SRAM_BOOTLOADER_SZ,
+			omap_sram_size - SRAM_BOOTLOADER_SZ);
+	omap_sram_size -= reserved;
+
 	omap_sram_ceil = omap_sram_base + omap_sram_size;
 }
 
diff --git a/arch/avr32/include/asm/syscalls.h b/arch/avr32/include/asm/syscalls.h
index 483d666..66a1972 100644
--- a/arch/avr32/include/asm/syscalls.h
+++ b/arch/avr32/include/asm/syscalls.h
@@ -29,10 +29,6 @@
 			       struct pt_regs *);
 asmlinkage int sys_rt_sigreturn(struct pt_regs *);
 
-/* kernel/sys_avr32.c */
-asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
-			  unsigned long, unsigned long, off_t);
-
 /* mm/cache.c */
 asmlinkage int sys_cacheflush(int, void __user *, size_t);
 
diff --git a/arch/avr32/kernel/sys_avr32.c b/arch/avr32/kernel/sys_avr32.c
index 5d2daea..459349b 100644
--- a/arch/avr32/kernel/sys_avr32.c
+++ b/arch/avr32/kernel/sys_avr32.c
@@ -5,39 +5,8 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/mm.h>
 #include <linux/unistd.h>
 
-#include <asm/mman.h>
-#include <asm/uaccess.h>
-#include <asm/syscalls.h>
-
-asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
-			  unsigned long prot, unsigned long flags,
-			  unsigned long fd, off_t offset)
-{
-	int error = -EBADF;
-	struct file *file = NULL;
-
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-	if (!(flags & MAP_ANONYMOUS)) {
-		file = fget(fd);
-		if (!file)
-			return error;
-	}
-
-	down_write(&current->mm->mmap_sem);
-	error = do_mmap_pgoff(file, addr, len, prot, flags, offset);
-	up_write(&current->mm->mmap_sem);
-
-	if (file)
-		fput(file);
-	return error;
-}
-
 int kernel_execve(const char *file, char **argv, char **envp)
 {
 	register long scno asm("r8") = __NR_execve;
diff --git a/arch/avr32/kernel/syscall-stubs.S b/arch/avr32/kernel/syscall-stubs.S
index f7244cd..0447a3e 100644
--- a/arch/avr32/kernel/syscall-stubs.S
+++ b/arch/avr32/kernel/syscall-stubs.S
@@ -61,7 +61,7 @@
 __sys_mmap2:
 	pushm	lr
 	st.w	--sp, ARG6
-	call	sys_mmap2
+	call	sys_mmap_pgoff
 	sub	sp, -4
 	popm	pc
 
diff --git a/arch/blackfin/kernel/sys_bfin.c b/arch/blackfin/kernel/sys_bfin.c
index afcef12..2e7f8e1 100644
--- a/arch/blackfin/kernel/sys_bfin.c
+++ b/arch/blackfin/kernel/sys_bfin.c
@@ -22,39 +22,6 @@
 #include <asm/cacheflush.h>
 #include <asm/dma.h>
 
-/* common code for old and new mmaps */
-static inline long
-do_mmap2(unsigned long addr, unsigned long len,
-	 unsigned long prot, unsigned long flags,
-	 unsigned long fd, unsigned long pgoff)
-{
-	int error = -EBADF;
-	struct file *file = NULL;
-
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-	if (!(flags & MAP_ANONYMOUS)) {
-		file = fget(fd);
-		if (!file)
-			goto out;
-	}
-
-	down_write(&current->mm->mmap_sem);
-	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-	up_write(&current->mm->mmap_sem);
-
-	if (file)
-		fput(file);
- out:
-	return error;
-}
-
-asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
-			  unsigned long prot, unsigned long flags,
-			  unsigned long fd, unsigned long pgoff)
-{
-	return do_mmap2(addr, len, prot, flags, fd, pgoff);
-}
-
 asmlinkage void *sys_sram_alloc(size_t size, unsigned long flags)
 {
 	return sram_alloc_with_lsl(size, flags);
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index a50637a..f3f8bb4 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -1422,7 +1422,7 @@
 	.long _sys_ni_syscall	/* streams2 */
 	.long _sys_vfork		/* 190 */
 	.long _sys_getrlimit
-	.long _sys_mmap2
+	.long _sys_mmap_pgoff
 	.long _sys_truncate64
 	.long _sys_ftruncate64
 	.long _sys_stat64	/* 195 */
diff --git a/arch/cris/kernel/sys_cris.c b/arch/cris/kernel/sys_cris.c
index 2ad962c..c2bbb1a 100644
--- a/arch/cris/kernel/sys_cris.c
+++ b/arch/cris/kernel/sys_cris.c
@@ -26,31 +26,6 @@
 #include <asm/uaccess.h>
 #include <asm/segment.h>
 
-/* common code for old and new mmaps */
-static inline long
-do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
-        unsigned long flags, unsigned long fd, unsigned long pgoff)
-{
-        int error = -EBADF;
-        struct file * file = NULL;
-
-        flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-        if (!(flags & MAP_ANONYMOUS)) {
-                file = fget(fd);
-                if (!file)
-                        goto out;
-        }
-
-        down_write(&current->mm->mmap_sem);
-        error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-        up_write(&current->mm->mmap_sem);
-
-        if (file)
-                fput(file);
-out:
-        return error;
-}
-
 asmlinkage unsigned long old_mmap(unsigned long __user *args)
 {        
 	unsigned long buffer[6];
@@ -63,7 +38,7 @@
 	if (buffer[5] & ~PAGE_MASK) /* verify that offset is on page boundary */
 		goto out;
 
-	err = do_mmap2(buffer[0], buffer[1], buffer[2], buffer[3],
+	err = sys_mmap_pgoff(buffer[0], buffer[1], buffer[2], buffer[3],
                        buffer[4], buffer[5] >> PAGE_SHIFT);
 out:
 	return err;
@@ -73,7 +48,8 @@
 sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
           unsigned long flags, unsigned long fd, unsigned long pgoff)
 {
-        return do_mmap2(addr, len, prot, flags, fd, pgoff);
+	/* bug(?): 8Kb pages here */
+        return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
 }
 
 /*
diff --git a/arch/frv/kernel/sys_frv.c b/arch/frv/kernel/sys_frv.c
index 2b6b528..1d3d4c9 100644
--- a/arch/frv/kernel/sys_frv.c
+++ b/arch/frv/kernel/sys_frv.c
@@ -31,9 +31,6 @@
 			  unsigned long prot, unsigned long flags,
 			  unsigned long fd, unsigned long pgoff)
 {
-	int error = -EBADF;
-	struct file * file = NULL;
-
 	/* As with sparc32, make sure the shift for mmap2 is constant
 	   (12), no matter what PAGE_SIZE we have.... */
 
@@ -41,70 +38,11 @@
 	   trying to map something we can't */
 	if (pgoff & ((1 << (PAGE_SHIFT - 12)) - 1))
 		return -EINVAL;
-	pgoff >>= PAGE_SHIFT - 12;
 
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-	if (!(flags & MAP_ANONYMOUS)) {
-		file = fget(fd);
-		if (!file)
-			goto out;
-	}
-
-	down_write(&current->mm->mmap_sem);
-	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-	up_write(&current->mm->mmap_sem);
-
-	if (file)
-		fput(file);
-out:
-	return error;
+	return sys_mmap_pgoff(addr, len, prot, flags, fd,
+			      pgoff >> (PAGE_SHIFT - 12));
 }
 
-#if 0 /* DAVIDM - do we want this */
-struct mmap_arg_struct64 {
-	__u32 addr;
-	__u32 len;
-	__u32 prot;
-	__u32 flags;
-	__u64 offset; /* 64 bits */
-	__u32 fd;
-};
-
-asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
-{
-	int error = -EFAULT;
-	struct file * file = NULL;
-	struct mmap_arg_struct64 a;
-	unsigned long pgoff;
-
-	if (copy_from_user(&a, arg, sizeof(a)))
-		return -EFAULT;
-
-	if ((long)a.offset & ~PAGE_MASK)
-		return -EINVAL;
-
-	pgoff = a.offset >> PAGE_SHIFT;
-	if ((a.offset >> PAGE_SHIFT) != pgoff)
-		return -EINVAL;
-
-	if (!(a.flags & MAP_ANONYMOUS)) {
-		error = -EBADF;
-		file = fget(a.fd);
-		if (!file)
-			goto out;
-	}
-	a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
-	down_write(&current->mm->mmap_sem);
-	error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
-	up_write(&current->mm->mmap_sem);
-	if (file)
-		fput(file);
-out:
-	return error;
-}
-#endif
-
 /*
  * sys_ipc() is the de-multiplexer for the SysV IPC calls..
  *
diff --git a/arch/h8300/kernel/sys_h8300.c b/arch/h8300/kernel/sys_h8300.c
index 8cb5d73..b5969db 100644
--- a/arch/h8300/kernel/sys_h8300.c
+++ b/arch/h8300/kernel/sys_h8300.c
@@ -26,39 +26,6 @@
 #include <asm/traps.h>
 #include <asm/unistd.h>
 
-/* common code for old and new mmaps */
-static inline long do_mmap2(
-	unsigned long addr, unsigned long len,
-	unsigned long prot, unsigned long flags,
-	unsigned long fd, unsigned long pgoff)
-{
-	int error = -EBADF;
-	struct file * file = NULL;
-
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-	if (!(flags & MAP_ANONYMOUS)) {
-		file = fget(fd);
-		if (!file)
-			goto out;
-	}
-
-	down_write(&current->mm->mmap_sem);
-	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-	up_write(&current->mm->mmap_sem);
-
-	if (file)
-		fput(file);
-out:
-	return error;
-}
-
-asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
-	unsigned long prot, unsigned long flags,
-	unsigned long fd, unsigned long pgoff)
-{
-	return do_mmap2(addr, len, prot, flags, fd, pgoff);
-}
-
 /*
  * Perform the select(nd, in, out, ex, tv) and mmap() system
  * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
@@ -87,58 +54,12 @@
 	if (a.offset & ~PAGE_MASK)
 		goto out;
 
-	a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
-	error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
+	error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
+			       a.offset >> PAGE_SHIFT);
 out:
 	return error;
 }
 
-#if 0 /* DAVIDM - do we want this */
-struct mmap_arg_struct64 {
-	__u32 addr;
-	__u32 len;
-	__u32 prot;
-	__u32 flags;
-	__u64 offset; /* 64 bits */
-	__u32 fd;
-};
-
-asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
-{
-	int error = -EFAULT;
-	struct file * file = NULL;
-	struct mmap_arg_struct64 a;
-	unsigned long pgoff;
-
-	if (copy_from_user(&a, arg, sizeof(a)))
-		return -EFAULT;
-
-	if ((long)a.offset & ~PAGE_MASK)
-		return -EINVAL;
-
-	pgoff = a.offset >> PAGE_SHIFT;
-	if ((a.offset >> PAGE_SHIFT) != pgoff)
-		return -EINVAL;
-
-	if (!(a.flags & MAP_ANONYMOUS)) {
-		error = -EBADF;
-		file = fget(a.fd);
-		if (!file)
-			goto out;
-	}
-	a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
-	down_write(&current->mm->mmap_sem);
-	error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
-	up_write(&current->mm->mmap_sem);
-	if (file)
-		fput(file);
-out:
-	return error;
-}
-#endif
-
 struct sel_arg_struct {
 	unsigned long n;
 	fd_set *inp, *outp, *exp;
diff --git a/arch/h8300/kernel/syscalls.S b/arch/h8300/kernel/syscalls.S
index 4eb67fa..2d69881 100644
--- a/arch/h8300/kernel/syscalls.S
+++ b/arch/h8300/kernel/syscalls.S
@@ -206,7 +206,7 @@
 	.long SYMBOL_NAME(sys_ni_syscall)	/* streams2 */
 	.long SYMBOL_NAME(sys_vfork)            /* 190 */
 	.long SYMBOL_NAME(sys_getrlimit)
-	.long SYMBOL_NAME(sys_mmap2)
+	.long SYMBOL_NAME(sys_mmap_pgoff)
 	.long SYMBOL_NAME(sys_truncate64)
 	.long SYMBOL_NAME(sys_ftruncate64)
 	.long SYMBOL_NAME(sys_stat64)		/* 195 */
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 429ec96..045b746 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -858,6 +858,9 @@
 
 	prot = get_prot32(prot);
 
+	if (flags & MAP_HUGETLB)
+		return -ENOMEM;
+
 #if PAGE_SHIFT > IA32_PAGE_SHIFT
 	mutex_lock(&ia32_mmap_mutex);
 	{
diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h
index 88afb54..67455c2 100644
--- a/arch/ia64/include/asm/xen/hypervisor.h
+++ b/arch/ia64/include/asm/xen/hypervisor.h
@@ -37,35 +37,9 @@
 #include <xen/interface/xen.h>
 #include <xen/interface/version.h>	/* to compile feature.c */
 #include <xen/features.h>		/* to comiple xen-netfront.c */
+#include <xen/xen.h>
 #include <asm/xen/hypercall.h>
 
-/* xen_domain_type is set before executing any C code by early_xen_setup */
-enum xen_domain_type {
-	XEN_NATIVE,	/* running on bare hardware */
-	XEN_PV_DOMAIN,	/* running in a PV domain */
-	XEN_HVM_DOMAIN,	/* running in a Xen hvm domain*/
-};
-
-#ifdef CONFIG_XEN
-extern enum xen_domain_type xen_domain_type;
-#else
-#define xen_domain_type		XEN_NATIVE
-#endif
-
-#define xen_domain()		(xen_domain_type != XEN_NATIVE)
-#define xen_pv_domain()		(xen_domain() &&			\
-				 xen_domain_type == XEN_PV_DOMAIN)
-#define xen_hvm_domain()	(xen_domain() &&			\
-				 xen_domain_type == XEN_HVM_DOMAIN)
-
-#ifdef CONFIG_XEN_DOM0
-#define xen_initial_domain()	(xen_pv_domain() &&			\
-				 (xen_start_info->flags & SIF_INITDOMAIN))
-#else
-#define xen_initial_domain()	(0)
-#endif
-
-
 #ifdef CONFIG_XEN
 extern struct shared_info *HYPERVISOR_shared_info;
 extern struct start_info *xen_start_info;
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index 92ed83f..609d500 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -100,51 +100,7 @@
 asmlinkage unsigned long
 ia64_brk (unsigned long brk)
 {
-	unsigned long rlim, retval, newbrk, oldbrk;
-	struct mm_struct *mm = current->mm;
-
-	/*
-	 * Most of this replicates the code in sys_brk() except for an additional safety
-	 * check and the clearing of r8.  However, we can't call sys_brk() because we need
-	 * to acquire the mmap_sem before we can do the test...
-	 */
-	down_write(&mm->mmap_sem);
-
-	if (brk < mm->end_code)
-		goto out;
-	newbrk = PAGE_ALIGN(brk);
-	oldbrk = PAGE_ALIGN(mm->brk);
-	if (oldbrk == newbrk)
-		goto set_brk;
-
-	/* Always allow shrinking brk. */
-	if (brk <= mm->brk) {
-		if (!do_munmap(mm, newbrk, oldbrk-newbrk))
-			goto set_brk;
-		goto out;
-	}
-
-	/* Check against unimplemented/unmapped addresses: */
-	if ((newbrk - oldbrk) > RGN_MAP_LIMIT || REGION_OFFSET(newbrk) > RGN_MAP_LIMIT)
-		goto out;
-
-	/* Check against rlimit.. */
-	rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
-	if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
-		goto out;
-
-	/* Check against existing mmap mappings. */
-	if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
-		goto out;
-
-	/* Ok, looks good - let it rip. */
-	if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
-		goto out;
-set_brk:
-	mm->brk = brk;
-out:
-	retval = mm->brk;
-	up_write(&mm->mmap_sem);
+	unsigned long retval = sys_brk(brk);
 	force_successful_syscall_return();
 	return retval;
 }
@@ -185,39 +141,6 @@
 	return 0;
 }
 
-static inline unsigned long
-do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, unsigned long pgoff)
-{
-	struct file *file = NULL;
-
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-	if (!(flags & MAP_ANONYMOUS)) {
-		file = fget(fd);
-		if (!file)
-			return -EBADF;
-
-		if (!file->f_op || !file->f_op->mmap) {
-			addr = -ENODEV;
-			goto out;
-		}
-	}
-
-	/* Careful about overflows.. */
-	len = PAGE_ALIGN(len);
-	if (!len || len > TASK_SIZE) {
-		addr = -EINVAL;
-		goto out;
-	}
-
-	down_write(&current->mm->mmap_sem);
-	addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-	up_write(&current->mm->mmap_sem);
-
-out:	if (file)
-		fput(file);
-	return addr;
-}
-
 /*
  * mmap2() is like mmap() except that the offset is expressed in units
  * of PAGE_SIZE (instead of bytes).  This allows to mmap2() (pieces
@@ -226,7 +149,7 @@
 asmlinkage unsigned long
 sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff)
 {
-	addr = do_mmap2(addr, len, prot, flags, fd, pgoff);
+	addr = sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
 	if (!IS_ERR((void *) addr))
 		force_successful_syscall_return();
 	return addr;
@@ -238,7 +161,7 @@
 	if (offset_in_page(off) != 0)
 		return -EINVAL;
 
-	addr = do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
+	addr = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
 	if (!IS_ERR((void *) addr))
 		force_successful_syscall_return();
 	return addr;
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index c0fca2c..df639db7 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -131,6 +131,7 @@
 }
 
 struct pci_root_info {
+	struct acpi_device *bridge;
 	struct pci_controller *controller;
 	char *name;
 };
@@ -297,9 +298,20 @@
 	window->offset = offset;
 
 	if (insert_resource(root, &window->resource)) {
-		printk(KERN_ERR "alloc 0x%llx-0x%llx from %s for %s failed\n",
-			window->resource.start, window->resource.end,
-			root->name, info->name);
+		dev_err(&info->bridge->dev,
+			"can't allocate host bridge window %pR\n",
+			&window->resource);
+	} else {
+		if (offset)
+			dev_info(&info->bridge->dev, "host bridge window %pR "
+				 "(PCI address [%#llx-%#llx])\n",
+				 &window->resource,
+				 window->resource.start - offset,
+				 window->resource.end - offset);
+		else
+			dev_info(&info->bridge->dev,
+				 "host bridge window %pR\n",
+				 &window->resource);
 	}
 
 	return AE_OK;
@@ -319,8 +331,9 @@
 		    (res->end - res->start < 16))
 			continue;
 		if (j >= PCI_BUS_NUM_RESOURCES) {
-			printk("Ignoring range [%#llx-%#llx] (%lx)\n",
-					res->start, res->end, res->flags);
+			dev_warn(&bus->dev,
+				 "ignoring host bridge window %pR (no space)\n",
+				 res);
 			continue;
 		}
 		bus->resource[j++] = res;
@@ -364,6 +377,7 @@
 			goto out3;
 
 		sprintf(name, "PCI Bus %04x:%02x", domain, bus);
+		info.bridge = device;
 		info.controller = controller;
 		info.name = name;
 		acpi_walk_resources(device->handle, METHOD_NAME__CRS,
@@ -720,9 +734,6 @@
 	return ret;
 }
 
-/* It's defined in drivers/pci/pci.c */
-extern u8 pci_cache_line_size;
-
 /**
  * set_pci_cacheline_size - determine cacheline size for PCI devices
  *
@@ -731,7 +742,7 @@
  *
  * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
  */
-static void __init set_pci_cacheline_size(void)
+static void __init set_pci_dfl_cacheline_size(void)
 {
 	unsigned long levels, unique_caches;
 	long status;
@@ -751,7 +762,7 @@
 			"(status=%ld)\n", __func__, status);
 		return;
 	}
-	pci_cache_line_size = (1 << cci.pcci_line_size) / 4;
+	pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4;
 }
 
 u64 ia64_dma_get_required_mask(struct device *dev)
@@ -782,7 +793,7 @@
 
 static int __init pcibios_init(void)
 {
-	set_pci_cacheline_size();
+	set_pci_dfl_cacheline_size();
 	return 0;
 }
 
diff --git a/arch/m32r/kernel/sys_m32r.c b/arch/m32r/kernel/sys_m32r.c
index 305ac85..d3c865c 100644
--- a/arch/m32r/kernel/sys_m32r.c
+++ b/arch/m32r/kernel/sys_m32r.c
@@ -76,30 +76,6 @@
 	return oldval;
 }
 
-asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
-	unsigned long prot, unsigned long flags,
-	unsigned long fd, unsigned long pgoff)
-{
-	int error = -EBADF;
-	struct file *file = NULL;
-
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-	if (!(flags & MAP_ANONYMOUS)) {
-		file = fget(fd);
-		if (!file)
-			goto out;
-	}
-
-	down_write(&current->mm->mmap_sem);
-	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-	up_write(&current->mm->mmap_sem);
-
-	if (file)
-		fput(file);
-out:
-	return error;
-}
-
 /*
  * sys_ipc() is the de-multiplexer for the SysV IPC calls..
  *
diff --git a/arch/m32r/kernel/syscall_table.S b/arch/m32r/kernel/syscall_table.S
index aa3bf4c..60536e2 100644
--- a/arch/m32r/kernel/syscall_table.S
+++ b/arch/m32r/kernel/syscall_table.S
@@ -191,7 +191,7 @@
 	.long sys_ni_syscall		/* streams2 */
 	.long sys_vfork			/* 190 */
 	.long sys_getrlimit
-	.long sys_mmap2
+	.long sys_mmap_pgoff
 	.long sys_truncate64
 	.long sys_ftruncate64
 	.long sys_stat64		/* 195 */
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
index 7deb402..218f441 100644
--- a/arch/m68k/kernel/sys_m68k.c
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -29,37 +29,16 @@
 #include <asm/page.h>
 #include <asm/unistd.h>
 
-/* common code for old and new mmaps */
-static inline long do_mmap2(
-	unsigned long addr, unsigned long len,
-	unsigned long prot, unsigned long flags,
-	unsigned long fd, unsigned long pgoff)
-{
-	int error = -EBADF;
-	struct file * file = NULL;
-
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-	if (!(flags & MAP_ANONYMOUS)) {
-		file = fget(fd);
-		if (!file)
-			goto out;
-	}
-
-	down_write(&current->mm->mmap_sem);
-	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-	up_write(&current->mm->mmap_sem);
-
-	if (file)
-		fput(file);
-out:
-	return error;
-}
-
 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
 	unsigned long prot, unsigned long flags,
 	unsigned long fd, unsigned long pgoff)
 {
-	return do_mmap2(addr, len, prot, flags, fd, pgoff);
+	/*
+	 * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
+	 * so we need to shift the argument down by 1; m68k mmap64(3)
+	 * (in libc) expects the last argument of mmap2 in 4Kb units.
+	 */
+	return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
 }
 
 /*
@@ -90,58 +69,12 @@
 	if (a.offset & ~PAGE_MASK)
 		goto out;
 
-	a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
-	error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
+	error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
+			       a.offset >> PAGE_SHIFT);
 out:
 	return error;
 }
 
-#if 0
-struct mmap_arg_struct64 {
-	__u32 addr;
-	__u32 len;
-	__u32 prot;
-	__u32 flags;
-	__u64 offset; /* 64 bits */
-	__u32 fd;
-};
-
-asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
-{
-	int error = -EFAULT;
-	struct file * file = NULL;
-	struct mmap_arg_struct64 a;
-	unsigned long pgoff;
-
-	if (copy_from_user(&a, arg, sizeof(a)))
-		return -EFAULT;
-
-	if ((long)a.offset & ~PAGE_MASK)
-		return -EINVAL;
-
-	pgoff = a.offset >> PAGE_SHIFT;
-	if ((a.offset >> PAGE_SHIFT) != pgoff)
-		return -EINVAL;
-
-	if (!(a.flags & MAP_ANONYMOUS)) {
-		error = -EBADF;
-		file = fget(a.fd);
-		if (!file)
-			goto out;
-	}
-	a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
-	down_write(&current->mm->mmap_sem);
-	error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
-	up_write(&current->mm->mmap_sem);
-	if (file)
-		fput(file);
-out:
-	return error;
-}
-#endif
-
 struct sel_arg_struct {
 	unsigned long n;
 	fd_set __user *inp, *outp, *exp;
diff --git a/arch/m68knommu/kernel/sys_m68k.c b/arch/m68knommu/kernel/sys_m68k.c
index efdd090..b67cbc7 100644
--- a/arch/m68knommu/kernel/sys_m68k.c
+++ b/arch/m68knommu/kernel/sys_m68k.c
@@ -27,39 +27,6 @@
 #include <asm/cacheflush.h>
 #include <asm/unistd.h>
 
-/* common code for old and new mmaps */
-static inline long do_mmap2(
-	unsigned long addr, unsigned long len,
-	unsigned long prot, unsigned long flags,
-	unsigned long fd, unsigned long pgoff)
-{
-	int error = -EBADF;
-	struct file * file = NULL;
-
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-	if (!(flags & MAP_ANONYMOUS)) {
-		file = fget(fd);
-		if (!file)
-			goto out;
-	}
-
-	down_write(&current->mm->mmap_sem);
-	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-	up_write(&current->mm->mmap_sem);
-
-	if (file)
-		fput(file);
-out:
-	return error;
-}
-
-asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
-	unsigned long prot, unsigned long flags,
-	unsigned long fd, unsigned long pgoff)
-{
-	return do_mmap2(addr, len, prot, flags, fd, pgoff);
-}
-
 /*
  * Perform the select(nd, in, out, ex, tv) and mmap() system
  * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
@@ -88,9 +55,8 @@
 	if (a.offset & ~PAGE_MASK)
 		goto out;
 
-	a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
-	error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
+	error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
+				a.offset >> PAGE_SHIFT);
 out:
 	return error;
 }
diff --git a/arch/m68knommu/kernel/syscalltable.S b/arch/m68knommu/kernel/syscalltable.S
index 23535cc..486837e 100644
--- a/arch/m68knommu/kernel/syscalltable.S
+++ b/arch/m68knommu/kernel/syscalltable.S
@@ -210,7 +210,7 @@
 	.long sys_ni_syscall	/* streams2 */
 	.long sys_vfork		/* 190 */
 	.long sys_getrlimit
-	.long sys_mmap2
+	.long sys_mmap_pgoff
 	.long sys_truncate64
 	.long sys_ftruncate64
 	.long sys_stat64	/* 195 */
diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c
index 07cabed..9f3c205 100644
--- a/arch/microblaze/kernel/sys_microblaze.c
+++ b/arch/microblaze/kernel/sys_microblaze.c
@@ -62,46 +62,14 @@
 	return error;
 }
 
-asmlinkage long
-sys_mmap2(unsigned long addr, unsigned long len,
-	unsigned long prot, unsigned long flags,
-	unsigned long fd, unsigned long pgoff)
-{
-	struct file *file = NULL;
-	int ret = -EBADF;
-
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-	if (!(flags & MAP_ANONYMOUS)) {
-		file = fget(fd);
-		if (!file) {
-			printk(KERN_INFO "no fd in mmap\r\n");
-			goto out;
-		}
-	}
-
-	down_write(&current->mm->mmap_sem);
-	ret = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-	up_write(&current->mm->mmap_sem);
-	if (file)
-		fput(file);
-out:
-	return ret;
-}
-
 asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
 			unsigned long prot, unsigned long flags,
 			unsigned long fd, off_t pgoff)
 {
-	int err = -EINVAL;
+	if (pgoff & ~PAGE_MASK)
+		return -EINVAL;
 
-	if (pgoff & ~PAGE_MASK) {
-		printk(KERN_INFO "no pagemask in mmap\r\n");
-		goto out;
-	}
-
-	err = sys_mmap2(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT);
-out:
-	return err;
+	return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT);
 }
 
 /*
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index c1ab1dc..b96f365 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -196,7 +196,7 @@
 	.long sys_ni_syscall		/* reserved for streams2 */
 	.long sys_vfork		/* 190 */
 	.long sys_getrlimit
-	.long sys_mmap2			/* mmap2 */
+	.long sys_mmap_pgoff		/* mmap2 */
 	.long sys_truncate64
 	.long sys_ftruncate64
 	.long sys_stat64		/* 195 */
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 1a2793e..f042563 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -67,28 +67,13 @@
 	unsigned long, prot, unsigned long, flags, unsigned long, fd,
 	unsigned long, pgoff)
 {
-	struct file * file = NULL;
 	unsigned long error;
 
 	error = -EINVAL;
 	if (pgoff & (~PAGE_MASK >> 12))
 		goto out;
-	pgoff >>= PAGE_SHIFT-12;
-
-	if (!(flags & MAP_ANONYMOUS)) {
-		error = -EBADF;
-		file = fget(fd);
-		if (!file)
-			goto out;
-	}
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
-	down_write(&current->mm->mmap_sem);
-	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-	up_write(&current->mm->mmap_sem);
-	if (file)
-		fput(file);
-
+	error = sys_mmap_pgoff(addr, len, prot, flags, fd,
+			       pgoff >> (PAGE_SHIFT-12));
 out:
 	return error;
 }
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index fe0d798..3f7f466 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -93,7 +93,8 @@
 		 * We do not accept a shared mapping if it would violate
 		 * cache aliasing constraints.
 		 */
-		if ((flags & MAP_SHARED) && (addr & shm_align_mask))
+		if ((flags & MAP_SHARED) &&
+		    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
 			return -EINVAL;
 		return addr;
 	}
@@ -129,31 +130,6 @@
 	}
 }
 
-/* common code for old and new mmaps */
-static inline unsigned long
-do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
-        unsigned long flags, unsigned long fd, unsigned long pgoff)
-{
-	unsigned long error = -EBADF;
-	struct file * file = NULL;
-
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-	if (!(flags & MAP_ANONYMOUS)) {
-		file = fget(fd);
-		if (!file)
-			goto out;
-	}
-
-	down_write(&current->mm->mmap_sem);
-	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-	up_write(&current->mm->mmap_sem);
-
-	if (file)
-		fput(file);
-out:
-	return error;
-}
-
 SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
 	unsigned long, prot, unsigned long, flags, unsigned long,
 	fd, off_t, offset)
@@ -164,7 +140,7 @@
 	if (offset & ~PAGE_MASK)
 		goto out;
 
-	result = do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
+	result = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
 
 out:
 	return result;
@@ -177,7 +153,7 @@
 	if (pgoff & (~PAGE_MASK >> 12))
 		return -EINVAL;
 
-	return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12));
+	return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12));
 }
 
 save_static_function(sys_fork);
diff --git a/arch/mn10300/include/asm/mman.h b/arch/mn10300/include/asm/mman.h
index 8eebf89..db5c53da 100644
--- a/arch/mn10300/include/asm/mman.h
+++ b/arch/mn10300/include/asm/mman.h
@@ -1 +1,6 @@
 #include <asm-generic/mman.h>
+
+#define MIN_MAP_ADDR	PAGE_SIZE	/* minimum fixed mmap address */
+
+#define arch_mmap_check(addr, len, flags) \
+	(((flags) & MAP_FIXED && (addr) < MIN_MAP_ADDR) ? -EINVAL : 0)
diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S
index a94e7ea..c9ee6c0 100644
--- a/arch/mn10300/kernel/entry.S
+++ b/arch/mn10300/kernel/entry.S
@@ -578,7 +578,7 @@
 	.long sys_ni_syscall	/* reserved for streams2 */
 	.long sys_vfork		/* 190 */
 	.long sys_getrlimit
-	.long sys_mmap2
+	.long sys_mmap_pgoff
 	.long sys_truncate64
 	.long sys_ftruncate64
 	.long sys_stat64	/* 195 */
diff --git a/arch/mn10300/kernel/sys_mn10300.c b/arch/mn10300/kernel/sys_mn10300.c
index 8ca5af0..17cc6ce 100644
--- a/arch/mn10300/kernel/sys_mn10300.c
+++ b/arch/mn10300/kernel/sys_mn10300.c
@@ -23,47 +23,13 @@
 
 #include <asm/uaccess.h>
 
-#define MIN_MAP_ADDR	PAGE_SIZE	/* minimum fixed mmap address */
-
-/*
- * memory mapping syscall
- */
-asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
-			  unsigned long prot, unsigned long flags,
-			  unsigned long fd, unsigned long pgoff)
-{
-	struct file *file = NULL;
-	long error = -EINVAL;
-
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
-	if (flags & MAP_FIXED && addr < MIN_MAP_ADDR)
-		goto out;
-
-	error = -EBADF;
-	if (!(flags & MAP_ANONYMOUS)) {
-		file = fget(fd);
-		if (!file)
-			goto out;
-	}
-
-	down_write(&current->mm->mmap_sem);
-	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-	up_write(&current->mm->mmap_sem);
-
-	if (file)
-		fput(file);
-out:
-	return error;
-}
-
 asmlinkage long old_mmap(unsigned long addr, unsigned long len,
 			 unsigned long prot, unsigned long flags,
 			 unsigned long fd, unsigned long offset)
 {
 	if (offset & ~PAGE_MASK)
 		return -EINVAL;
-	return sys_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
+	return sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
 }
 
 struct sel_arg_struct {
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 71b3195..9147391 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -110,37 +110,14 @@
 	return addr;
 }
 
-static unsigned long do_mmap2(unsigned long addr, unsigned long len,
-	unsigned long prot, unsigned long flags, unsigned long fd,
-	unsigned long pgoff)
-{
-	struct file * file = NULL;
-	unsigned long error = -EBADF;
-	if (!(flags & MAP_ANONYMOUS)) {
-		file = fget(fd);
-		if (!file)
-			goto out;
-	}
-
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
-	down_write(&current->mm->mmap_sem);
-	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-	up_write(&current->mm->mmap_sem);
-
-	if (file != NULL)
-		fput(file);
-out:
-	return error;
-}
-
 asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
 	unsigned long prot, unsigned long flags, unsigned long fd,
 	unsigned long pgoff)
 {
 	/* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
 	   we have. */
-	return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12));
+	return sys_mmap_pgoff(addr, len, prot, flags, fd,
+			      pgoff >> (PAGE_SHIFT - 12));
 }
 
 asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
@@ -148,7 +125,8 @@
 		unsigned long offset)
 {
 	if (!(offset & ~PAGE_MASK)) {
-		return do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
+		return sys_mmap_pgoff(addr, len, prot, flags, fd,
+					offset >> PAGE_SHIFT);
 	} else {
 		return -EINVAL;
 	}
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index c04832c..3370e62 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -140,7 +140,6 @@
 			unsigned long prot, unsigned long flags,
 			unsigned long fd, unsigned long off, int shift)
 {
-	struct file * file = NULL;
 	unsigned long ret = -EINVAL;
 
 	if (!arch_validate_prot(prot))
@@ -151,20 +150,8 @@
 			goto out;
 		off >>= shift;
 	}
-		
-	ret = -EBADF;
-	if (!(flags & MAP_ANONYMOUS)) {
-		if (!(file = fget(fd)))
-			goto out;
-	}
 
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
-	down_write(&current->mm->mmap_sem);
-	ret = do_mmap_pgoff(file, addr, len, prot, flags, off);
-	up_write(&current->mm->mmap_sem);
-	if (file)
-		fput(file);
+	ret = sys_mmap_pgoff(addr, len, prot, flags, fd, off);
 out:
 	return ret;
 }
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 25c31d6..22c9e55 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -624,38 +624,6 @@
 	u32	offset;
 };
 
-/* common code for old and new mmaps */
-static inline long do_mmap2(
-	unsigned long addr, unsigned long len,
-	unsigned long prot, unsigned long flags,
-	unsigned long fd, unsigned long pgoff)
-{
-	struct file * file = NULL;
-	unsigned long error = -EBADF;
-
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-	if (!(flags & MAP_ANONYMOUS)) {
-		file = fget(fd);
-		if (!file)
-			goto out;
-	}
-
-	down_write(&current->mm->mmap_sem);
-	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-	if (!IS_ERR((void *) error) && error + len >= 0x80000000ULL) {
-		/* Result is out of bounds.  */
-		do_munmap(current->mm, addr, len);
-		error = -ENOMEM;
-	}
-	up_write(&current->mm->mmap_sem);
-
-	if (file)
-		fput(file);
-out:    
-	return error;
-}
-
-
 asmlinkage unsigned long
 old32_mmap(struct mmap_arg_struct_emu31 __user *arg)
 {
@@ -669,7 +637,8 @@
 	if (a.offset & ~PAGE_MASK)
 		goto out;
 
-	error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); 
+	error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
+			       a.offset >> PAGE_SHIFT);
 out:
 	return error;
 }
@@ -682,7 +651,7 @@
 
 	if (copy_from_user(&a, arg, sizeof(a)))
 		goto out;
-	error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
+	error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
 out:
 	return error;
 }
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index e9d94f6..86a74c9 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -32,32 +32,6 @@
 #include <asm/uaccess.h>
 #include "entry.h"
 
-/* common code for old and new mmaps */
-static inline long do_mmap2(
-	unsigned long addr, unsigned long len,
-	unsigned long prot, unsigned long flags,
-	unsigned long fd, unsigned long pgoff)
-{
-	long error = -EBADF;
-	struct file * file = NULL;
-
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-	if (!(flags & MAP_ANONYMOUS)) {
-		file = fget(fd);
-		if (!file)
-			goto out;
-	}
-
-	down_write(&current->mm->mmap_sem);
-	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-	up_write(&current->mm->mmap_sem);
-
-	if (file)
-		fput(file);
-out:
-	return error;
-}
-
 /*
  * Perform the select(nd, in, out, ex, tv) and mmap() system
  * calls. Linux for S/390 isn't able to handle more than 5
@@ -81,7 +55,7 @@
 
 	if (copy_from_user(&a, arg, sizeof(a)))
 		goto out;
-	error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
+	error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
 out:
 	return error;
 }
@@ -98,7 +72,7 @@
 	if (a.offset & ~PAGE_MASK)
 		goto out;
 
-	error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
+	error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
 out:
 	return error;
 }
diff --git a/arch/score/kernel/sys_score.c b/arch/score/kernel/sys_score.c
index 0012494..856ed68 100644
--- a/arch/score/kernel/sys_score.c
+++ b/arch/score/kernel/sys_score.c
@@ -36,34 +36,16 @@
 sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
 	  unsigned long flags, unsigned long fd, unsigned long pgoff)
 {
-	int error = -EBADF;
-	struct file *file = NULL;
-
-	if (pgoff & (~PAGE_MASK >> 12))
-		return -EINVAL;
-
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-	if (!(flags & MAP_ANONYMOUS)) {
-		file = fget(fd);
-		if (!file)
-			return error;
-	}
-
-	down_write(&current->mm->mmap_sem);
-	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-	up_write(&current->mm->mmap_sem);
-
-	if (file)
-		fput(file);
-
-	return error;
+	return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
 }
 
 asmlinkage long
 sys_mmap(unsigned long addr, unsigned long len, unsigned long prot,
-	unsigned long flags, unsigned long fd, off_t pgoff)
+	unsigned long flags, unsigned long fd, off_t offset)
 {
-	return sys_mmap2(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT);
+	if (unlikely(offset & ~PAGE_MASK))
+		return -EINVAL;
+	return sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
 }
 
 asmlinkage long
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index 8aa5d1c..71399cd 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -28,37 +28,13 @@
 #include <asm/cacheflush.h>
 #include <asm/cachectl.h>
 
-static inline long
-do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
-	 unsigned long flags, int fd, unsigned long pgoff)
-{
-	int error = -EBADF;
-	struct file *file = NULL;
-
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-	if (!(flags & MAP_ANONYMOUS)) {
-		file = fget(fd);
-		if (!file)
-			goto out;
-	}
-
-	down_write(&current->mm->mmap_sem);
-	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-	up_write(&current->mm->mmap_sem);
-
-	if (file)
-		fput(file);
-out:
-	return error;
-}
-
 asmlinkage int old_mmap(unsigned long addr, unsigned long len,
 	unsigned long prot, unsigned long flags,
 	int fd, unsigned long off)
 {
 	if (off & ~PAGE_MASK)
 		return -EINVAL;
-	return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
+	return sys_mmap_pgoff(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
 }
 
 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
@@ -74,7 +50,7 @@
 
 	pgoff >>= PAGE_SHIFT - 12;
 
-	return do_mmap2(addr, len, prot, flags, fd, pgoff);
+	return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
 }
 
 /*
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index d2984fa..afeb710 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -54,7 +54,8 @@
 		/* We do not accept a shared mapping if it would violate
 		 * cache aliasing constraints.
 		 */
-		if ((flags & MAP_SHARED) && (addr & shm_align_mask))
+		if ((flags & MAP_SHARED) &&
+		    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
 			return -EINVAL;
 		return addr;
 	}
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h
index b63e51c..b0576df 100644
--- a/arch/sparc/include/asm/pci_64.h
+++ b/arch/sparc/include/asm/pci_64.h
@@ -16,8 +16,6 @@
 
 #define PCI_IRQ_NONE		0xffffffff
 
-#define PCI_CACHE_LINE_BYTES	64
-
 static inline void pcibios_set_master(struct pci_dev *dev)
 {
 	/* No special bus mastering setup handling */
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index c686486..b85374f 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -1081,3 +1081,10 @@
 	*start = rp->start - offset;
 	*end = rp->end - offset;
 }
+
+static int __init pcibios_init(void)
+{
+	pci_dfl_cache_line_size = 64 >> 2;
+	return 0;
+}
+subsys_initcall(pcibios_init);
diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c
index 00abe87..dc0ac19 100644
--- a/arch/sparc/kernel/sys_sparc32.c
+++ b/arch/sparc/kernel/sys_sparc32.c
@@ -564,28 +564,6 @@
 	return do_sys_open(AT_FDCWD, filename, flags, mode);
 }
 
-extern unsigned long do_mremap(unsigned long addr,
-	unsigned long old_len, unsigned long new_len,
-	unsigned long flags, unsigned long new_addr);
-                
-asmlinkage unsigned long sys32_mremap(unsigned long addr,
-	unsigned long old_len, unsigned long new_len,
-	unsigned long flags, u32 __new_addr)
-{
-	unsigned long ret = -EINVAL;
-	unsigned long new_addr = __new_addr;
-
-	if (unlikely(sparc_mmap_check(addr, old_len)))
-		goto out;
-	if (unlikely(sparc_mmap_check(new_addr, new_len)))
-		goto out;
-	down_write(&current->mm->mmap_sem);
-	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
-	up_write(&current->mm->mmap_sem);
-out:
-	return ret;       
-}
-
 long sys32_lookup_dcookie(unsigned long cookie_high,
 			  unsigned long cookie_low,
 			  char __user *buf, size_t len)
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
index 03035c8..3a82e65 100644
--- a/arch/sparc/kernel/sys_sparc_32.c
+++ b/arch/sparc/kernel/sys_sparc_32.c
@@ -45,7 +45,8 @@
 		/* We do not accept a shared mapping if it would violate
 		 * cache aliasing constraints.
 		 */
-		if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1)))
+		if ((flags & MAP_SHARED) &&
+		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
 			return -EINVAL;
 		return addr;
 	}
@@ -79,15 +80,6 @@
 	}
 }
 
-asmlinkage unsigned long sparc_brk(unsigned long brk)
-{
-	if(ARCH_SUN4C) {
-		if ((brk & 0xe0000000) != (current->mm->brk & 0xe0000000))
-			return current->mm->brk;
-	}
-	return sys_brk(brk);
-}
-
 /*
  * sys_pipe() is the normal C calling standard for creating
  * a pipe. It's not the way unix traditionally does this, though.
@@ -234,31 +226,6 @@
 }
 
 /* Linux version of mmap */
-static unsigned long do_mmap2(unsigned long addr, unsigned long len,
-	unsigned long prot, unsigned long flags, unsigned long fd,
-	unsigned long pgoff)
-{
-	struct file * file = NULL;
-	unsigned long retval = -EBADF;
-
-	if (!(flags & MAP_ANONYMOUS)) {
-		file = fget(fd);
-		if (!file)
-			goto out;
-	}
-
-	len = PAGE_ALIGN(len);
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
-	down_write(&current->mm->mmap_sem);
-	retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-	up_write(&current->mm->mmap_sem);
-
-	if (file)
-		fput(file);
-out:
-	return retval;
-}
 
 asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
 	unsigned long prot, unsigned long flags, unsigned long fd,
@@ -266,14 +233,16 @@
 {
 	/* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
 	   we have. */
-	return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12));
+	return sys_mmap_pgoff(addr, len, prot, flags, fd,
+			      pgoff >> (PAGE_SHIFT - 12));
 }
 
 asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
 	unsigned long prot, unsigned long flags, unsigned long fd,
 	unsigned long off)
 {
-	return do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
+	/* no alignment check? */
+	return sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
 }
 
 long sparc_remap_file_pages(unsigned long start, unsigned long size,
@@ -287,27 +256,6 @@
 				    (pgoff >> (PAGE_SHIFT - 12)), flags);
 }
 
-extern unsigned long do_mremap(unsigned long addr,
-	unsigned long old_len, unsigned long new_len,
-	unsigned long flags, unsigned long new_addr);
-                
-asmlinkage unsigned long sparc_mremap(unsigned long addr,
-	unsigned long old_len, unsigned long new_len,
-	unsigned long flags, unsigned long new_addr)
-{
-	unsigned long ret = -EINVAL;
-
-	if (unlikely(sparc_mmap_check(addr, old_len)))
-		goto out;
-	if (unlikely(sparc_mmap_check(new_addr, new_len)))
-		goto out;
-	down_write(&current->mm->mmap_sem);
-	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
-	up_write(&current->mm->mmap_sem);
-out:
-	return ret;       
-}
-
 /* we come to here via sys_nis_syscall so it can setup the regs argument */
 asmlinkage unsigned long
 c_sys_nis_syscall (struct pt_regs *regs)
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index e2d1024..cfa0e19 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -317,10 +317,14 @@
 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
 {
 	unsigned long align_goal, addr = -ENOMEM;
+	unsigned long (*get_area)(struct file *, unsigned long,
+				  unsigned long, unsigned long, unsigned long);
+
+	get_area = current->mm->get_unmapped_area;
 
 	if (flags & MAP_FIXED) {
 		/* Ok, don't mess with it. */
-		return get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
+		return get_area(NULL, orig_addr, len, pgoff, flags);
 	}
 	flags &= ~MAP_SHARED;
 
@@ -333,7 +337,7 @@
 		align_goal = (64UL * 1024);
 
 	do {
-		addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
+		addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
 		if (!(addr & ~PAGE_MASK)) {
 			addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
 			break;
@@ -351,7 +355,7 @@
 	 * be obtained.
 	 */
 	if (addr & ~PAGE_MASK)
-		addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
+		addr = get_area(NULL, orig_addr, len, pgoff, flags);
 
 	return addr;
 }
@@ -399,18 +403,6 @@
 	}
 }
 
-SYSCALL_DEFINE1(sparc_brk, unsigned long, brk)
-{
-	/* People could try to be nasty and use ta 0x6d in 32bit programs */
-	if (test_thread_flag(TIF_32BIT) && brk >= STACK_TOP32)
-		return current->mm->brk;
-
-	if (unlikely(straddles_64bit_va_hole(current->mm->brk, brk)))
-		return current->mm->brk;
-
-	return sys_brk(brk);
-}
-                                                                
 /*
  * sys_pipe() is the normal C calling standard for creating
  * a pipe. It's not the way unix traditionally does this, though.
@@ -568,23 +560,13 @@
 		unsigned long, prot, unsigned long, flags, unsigned long, fd,
 		unsigned long, off)
 {
-	struct file * file = NULL;
-	unsigned long retval = -EBADF;
+	unsigned long retval = -EINVAL;
 
-	if (!(flags & MAP_ANONYMOUS)) {
-		file = fget(fd);
-		if (!file)
-			goto out;
-	}
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-	len = PAGE_ALIGN(len);
-
-	down_write(&current->mm->mmap_sem);
-	retval = do_mmap(file, addr, len, prot, flags, off);
-	up_write(&current->mm->mmap_sem);
-
-	if (file)
-		fput(file);
+	if ((off + PAGE_ALIGN(len)) < off)
+		goto out;
+	if (off & ~PAGE_MASK)
+		goto out;
+	retval = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
 out:
 	return retval;
 }
@@ -614,12 +596,6 @@
 
 	if (test_thread_flag(TIF_32BIT))
 		goto out;
-	if (unlikely(new_len >= VA_EXCLUDE_START))
-		goto out;
-	if (unlikely(sparc_mmap_check(addr, old_len)))
-		goto out;
-	if (unlikely(sparc_mmap_check(new_addr, new_len)))
-		goto out;
 
 	down_write(&current->mm->mmap_sem);
 	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
diff --git a/arch/sparc/kernel/systbls.h b/arch/sparc/kernel/systbls.h
index a63c5d2..d2f999a 100644
--- a/arch/sparc/kernel/systbls.h
+++ b/arch/sparc/kernel/systbls.h
@@ -9,7 +9,6 @@
 struct new_utsname;
 
 extern asmlinkage unsigned long sys_getpagesize(void);
-extern asmlinkage unsigned long sparc_brk(unsigned long brk);
 extern asmlinkage long sparc_pipe(struct pt_regs *regs);
 extern asmlinkage long sys_ipc(unsigned int call, int first,
 			       unsigned long second,
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index ceb1530..801fc8e 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -19,7 +19,7 @@
 /*0*/	.long sys_restart_syscall, sys_exit, sys_fork, sys_read, sys_write
 /*5*/	.long sys_open, sys_close, sys_wait4, sys_creat, sys_link
 /*10*/  .long sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod
-/*15*/	.long sys_chmod, sys_lchown16, sparc_brk, sys_nis_syscall, sys_lseek
+/*15*/	.long sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, sys_lseek
 /*20*/	.long sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
 /*25*/	.long sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_pause
 /*30*/	.long sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice
@@ -67,7 +67,7 @@
 /*235*/	.long sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
 /*240*/	.long sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
 /*245*/	.long sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
-/*250*/	.long sparc_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
+/*250*/	.long sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
 /*255*/	.long sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
 /*260*/	.long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
 /*265*/	.long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index cc8e786..e575b46 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -21,7 +21,7 @@
 /*0*/	.word sys_restart_syscall, sys32_exit, sys_fork, sys_read, sys_write
 /*5*/	.word sys32_open, sys_close, sys32_wait4, sys32_creat, sys_link
 /*10*/  .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys32_mknod
-/*15*/	.word sys_chmod, sys_lchown16, sys_sparc_brk, sys32_perfctr, sys32_lseek
+/*15*/	.word sys_chmod, sys_lchown16, sys_brk, sys32_perfctr, sys32_lseek
 /*20*/	.word sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
 /*25*/	.word sys32_vmsplice, compat_sys_ptrace, sys_alarm, sys32_sigaltstack, sys_pause
 /*30*/	.word compat_sys_utime, sys_lchown, sys_fchown, sys32_access, sys32_nice
@@ -68,7 +68,7 @@
 	.word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys32_mlockall
 /*240*/	.word sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys32_sched_setscheduler, sys32_sched_getscheduler
 	.word sys_sched_yield, sys32_sched_get_priority_max, sys32_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep
-/*250*/	.word sys32_mremap, compat_sys_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl
+/*250*/	.word sys_mremap, compat_sys_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl
 	.word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep
 /*260*/	.word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun
 	.word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
@@ -96,7 +96,7 @@
 /*0*/	.word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write
 /*5*/	.word sys_open, sys_close, sys_wait4, sys_creat, sys_link
 /*10*/  .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod
-/*15*/	.word sys_chmod, sys_lchown, sys_sparc_brk, sys_perfctr, sys_lseek
+/*15*/	.word sys_chmod, sys_lchown, sys_brk, sys_perfctr, sys_lseek
 /*20*/	.word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid
 /*25*/	.word sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall
 /*30*/	.word sys_utime, sys_nis_syscall, sys_nis_syscall, sys_access, sys_nice
diff --git a/arch/um/kernel/syscall.c b/arch/um/kernel/syscall.c
index a4625c7..cccab85 100644
--- a/arch/um/kernel/syscall.c
+++ b/arch/um/kernel/syscall.c
@@ -8,6 +8,7 @@
 #include "linux/mm.h"
 #include "linux/sched.h"
 #include "linux/utsname.h"
+#include "linux/syscalls.h"
 #include "asm/current.h"
 #include "asm/mman.h"
 #include "asm/uaccess.h"
@@ -37,31 +38,6 @@
 	return ret;
 }
 
-/* common code for old and new mmaps */
-long sys_mmap2(unsigned long addr, unsigned long len,
-	       unsigned long prot, unsigned long flags,
-	       unsigned long fd, unsigned long pgoff)
-{
-	long error = -EBADF;
-	struct file * file = NULL;
-
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-	if (!(flags & MAP_ANONYMOUS)) {
-		file = fget(fd);
-		if (!file)
-			goto out;
-	}
-
-	down_write(&current->mm->mmap_sem);
-	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-	up_write(&current->mm->mmap_sem);
-
-	if (file)
-		fput(file);
- out:
-	return error;
-}
-
 long old_mmap(unsigned long addr, unsigned long len,
 	      unsigned long prot, unsigned long flags,
 	      unsigned long fd, unsigned long offset)
@@ -70,7 +46,7 @@
 	if (offset & ~PAGE_MASK)
 		goto out;
 
-	err = sys_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
+	err = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
  out:
 	return err;
 }
diff --git a/arch/um/sys-i386/shared/sysdep/syscalls.h b/arch/um/sys-i386/shared/sysdep/syscalls.h
index 9056981..e778767 100644
--- a/arch/um/sys-i386/shared/sysdep/syscalls.h
+++ b/arch/um/sys-i386/shared/sysdep/syscalls.h
@@ -20,7 +20,3 @@
 #define EXECUTE_SYSCALL(syscall, regs) \
 	((long (*)(struct syscall_args)) \
 	 (*sys_call_table[syscall]))(SYSCALL_ARGS(&regs->regs))
-
-extern long sys_mmap2(unsigned long addr, unsigned long len,
-		      unsigned long prot, unsigned long flags,
-		      unsigned long fd, unsigned long pgoff);
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 4eefdca..53147ad 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -696,7 +696,7 @@
 	.quad quiet_ni_syscall		/* streams2 */
 	.quad stub32_vfork            /* 190 */
 	.quad compat_sys_getrlimit
-	.quad sys32_mmap2
+	.quad sys_mmap_pgoff
 	.quad sys32_truncate64
 	.quad sys32_ftruncate64
 	.quad sys32_stat64		/* 195 */
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index df82c0e..422572c 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -155,9 +155,6 @@
 asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg)
 {
 	struct mmap_arg_struct a;
-	struct file *file = NULL;
-	unsigned long retval;
-	struct mm_struct *mm ;
 
 	if (copy_from_user(&a, arg, sizeof(a)))
 		return -EFAULT;
@@ -165,22 +162,8 @@
 	if (a.offset & ~PAGE_MASK)
 		return -EINVAL;
 
-	if (!(a.flags & MAP_ANONYMOUS)) {
-		file = fget(a.fd);
-		if (!file)
-			return -EBADF;
-	}
-
-	mm = current->mm;
-	down_write(&mm->mmap_sem);
-	retval = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags,
+	return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
 			       a.offset>>PAGE_SHIFT);
-	if (file)
-		fput(file);
-
-	up_write(&mm->mmap_sem);
-
-	return retval;
 }
 
 asmlinkage long sys32_mprotect(unsigned long start, size_t len,
@@ -483,30 +466,6 @@
 	return ret;
 }
 
-asmlinkage long sys32_mmap2(unsigned long addr, unsigned long len,
-			    unsigned long prot, unsigned long flags,
-			    unsigned long fd, unsigned long pgoff)
-{
-	struct mm_struct *mm = current->mm;
-	unsigned long error;
-	struct file *file = NULL;
-
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-	if (!(flags & MAP_ANONYMOUS)) {
-		file = fget(fd);
-		if (!file)
-			return -EBADF;
-	}
-
-	down_write(&mm->mmap_sem);
-	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-	up_write(&mm->mmap_sem);
-
-	if (file)
-		fput(file);
-	return error;
-}
-
 asmlinkage long sys32_olduname(struct oldold_utsname __user *name)
 {
 	char *arch = "x86_64";
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index b399988..b4bf9a9 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -118,11 +118,27 @@
 
 /* pci-mmconfig.c */
 
+/* "PCI MMCONFIG %04x [bus %02x-%02x]" */
+#define PCI_MMCFG_RESOURCE_NAME_LEN (22 + 4 + 2 + 2)
+
+struct pci_mmcfg_region {
+	struct list_head list;
+	struct resource res;
+	u64 address;
+	char __iomem *virt;
+	u16 segment;
+	u8 start_bus;
+	u8 end_bus;
+	char name[PCI_MMCFG_RESOURCE_NAME_LEN];
+};
+
 extern int __init pci_mmcfg_arch_init(void);
 extern void __init pci_mmcfg_arch_free(void);
+extern struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus);
 
-extern struct acpi_mcfg_allocation *pci_mmcfg_config;
-extern int pci_mmcfg_config_num;
+extern struct list_head pci_mmcfg_list;
+
+#define PCI_MMCFG_BUS_OFFSET(bus)      ((bus) << 20)
 
 /*
  * AMD Fam10h CPUs are buggy, and cannot access MMIO config space
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index 9af9dec..4a5a089e 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -57,9 +57,6 @@
 asmlinkage long sys32_personality(unsigned long);
 asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32);
 
-asmlinkage long sys32_mmap2(unsigned long, unsigned long, unsigned long,
-			    unsigned long, unsigned long, unsigned long);
-
 struct oldold_utsname;
 struct old_utsname;
 asmlinkage long sys32_olduname(struct oldold_utsname __user *);
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index 372b76e..1bb6e39 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -55,8 +55,6 @@
 struct oldold_utsname;
 struct old_utsname;
 
-asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
-			  unsigned long, unsigned long, unsigned long);
 asmlinkage int old_mmap(struct mmap_arg_struct __user *);
 asmlinkage int old_select(struct sel_arg_struct __user *);
 asmlinkage int sys_ipc(uint, int, int, int, void __user *, long);
diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
index d5b7e90..396ff4c 100644
--- a/arch/x86/include/asm/xen/hypervisor.h
+++ b/arch/x86/include/asm/xen/hypervisor.h
@@ -37,31 +37,4 @@
 extern struct shared_info *HYPERVISOR_shared_info;
 extern struct start_info *xen_start_info;
 
-enum xen_domain_type {
-	XEN_NATIVE,		/* running on bare hardware    */
-	XEN_PV_DOMAIN,		/* running in a PV domain      */
-	XEN_HVM_DOMAIN,		/* running in a Xen hvm domain */
-};
-
-#ifdef CONFIG_XEN
-extern enum xen_domain_type xen_domain_type;
-#else
-#define xen_domain_type		XEN_NATIVE
-#endif
-
-#define xen_domain()		(xen_domain_type != XEN_NATIVE)
-#define xen_pv_domain()		(xen_domain() &&			\
-				 xen_domain_type == XEN_PV_DOMAIN)
-#define xen_hvm_domain()	(xen_domain() &&			\
-				 xen_domain_type == XEN_HVM_DOMAIN)
-
-#ifdef CONFIG_XEN_DOM0
-#include <xen/interface/xen.h>
-
-#define xen_initial_domain()	(xen_pv_domain() && \
-				 xen_start_info->flags & SIF_INITDOMAIN)
-#else  /* !CONFIG_XEN_DOM0 */
-#define xen_initial_domain()	(0)
-#endif	/* CONFIG_XEN_DOM0 */
-
 #endif /* _ASM_X86_XEN_HYPERVISOR_H */
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 7ffc399..9c4a6f7 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -1336,6 +1336,9 @@
 		iommu_detected = 1;
 		amd_iommu_detected = 1;
 		x86_init.iommu.iommu_init = amd_iommu_init;
+
+		/* Make sure ACS will be enabled */
+		pci_request_acs();
 	}
 }
 
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 20a5b36..dd74fe7 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -86,9 +86,15 @@
 	gdb_regs[GDB_DS]	= regs->ds;
 	gdb_regs[GDB_ES]	= regs->es;
 	gdb_regs[GDB_CS]	= regs->cs;
-	gdb_regs[GDB_SS]	= __KERNEL_DS;
 	gdb_regs[GDB_FS]	= 0xFFFF;
 	gdb_regs[GDB_GS]	= 0xFFFF;
+	if (user_mode_vm(regs)) {
+		gdb_regs[GDB_SS] = regs->ss;
+		gdb_regs[GDB_SP] = regs->sp;
+	} else {
+		gdb_regs[GDB_SS] = __KERNEL_DS;
+		gdb_regs[GDB_SP] = kernel_stack_pointer(regs);
+	}
 #else
 	gdb_regs[GDB_R8]	= regs->r8;
 	gdb_regs[GDB_R9]	= regs->r9;
@@ -101,8 +107,8 @@
 	gdb_regs32[GDB_PS]	= regs->flags;
 	gdb_regs32[GDB_CS]	= regs->cs;
 	gdb_regs32[GDB_SS]	= regs->ss;
-#endif
 	gdb_regs[GDB_SP]	= kernel_stack_pointer(regs);
+#endif
 }
 
 /**
@@ -220,8 +226,7 @@
 			dr7 |= ((breakinfo[breakno].len << 2) |
 				 breakinfo[breakno].type) <<
 			       ((breakno << 2) + 16);
-			if (breakno >= 0 && breakno <= 3)
-				set_debugreg(breakinfo[breakno].addr, breakno);
+			set_debugreg(breakinfo[breakno].addr, breakno);
 
 		} else {
 			if ((dr7 & breakbit) && !breakinfo[breakno].enabled) {
@@ -395,7 +400,6 @@
 		/* set the trace bit if we're stepping */
 		if (remcomInBuffer[0] == 's') {
 			linux_regs->flags |= X86_EFLAGS_TF;
-			kgdb_single_step = 1;
 			atomic_set(&kgdb_cpu_doing_single_step,
 				   raw_smp_processor_id());
 		}
diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
index 1884a8d..dee1ff7 100644
--- a/arch/x86/kernel/sys_i386_32.c
+++ b/arch/x86/kernel/sys_i386_32.c
@@ -24,31 +24,6 @@
 
 #include <asm/syscalls.h>
 
-asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
-			  unsigned long prot, unsigned long flags,
-			  unsigned long fd, unsigned long pgoff)
-{
-	int error = -EBADF;
-	struct file *file = NULL;
-	struct mm_struct *mm = current->mm;
-
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-	if (!(flags & MAP_ANONYMOUS)) {
-		file = fget(fd);
-		if (!file)
-			goto out;
-	}
-
-	down_write(&mm->mmap_sem);
-	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-	up_write(&mm->mmap_sem);
-
-	if (file)
-		fput(file);
-out:
-	return error;
-}
-
 /*
  * Perform the select(nd, in, out, ex, tv) and mmap() system
  * calls. Linux/i386 didn't use to be able to handle more than
@@ -77,7 +52,7 @@
 	if (a.offset & ~PAGE_MASK)
 		goto out;
 
-	err = sys_mmap2(a.addr, a.len, a.prot, a.flags,
+	err = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags,
 			a.fd, a.offset >> PAGE_SHIFT);
 out:
 	return err;
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 45e00eb..8aa2057 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -23,26 +23,11 @@
 		unsigned long, fd, unsigned long, off)
 {
 	long error;
-	struct file *file;
-
 	error = -EINVAL;
 	if (off & ~PAGE_MASK)
 		goto out;
 
-	error = -EBADF;
-	file = NULL;
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-	if (!(flags & MAP_ANONYMOUS)) {
-		file = fget(fd);
-		if (!file)
-			goto out;
-	}
-	down_write(&current->mm->mmap_sem);
-	error = do_mmap_pgoff(file, addr, len, prot, flags, off >> PAGE_SHIFT);
-	up_write(&current->mm->mmap_sem);
-
-	if (file)
-		fput(file);
+	error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
 out:
 	return error;
 }
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index 70c2125..15228b5 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -191,7 +191,7 @@
 	.long sys_ni_syscall	/* reserved for streams2 */
 	.long ptregs_vfork	/* 190 */
 	.long sys_getrlimit
-	.long sys_mmap2
+	.long sys_mmap_pgoff
 	.long sys_truncate64
 	.long sys_ftruncate64
 	.long sys_stat64	/* 195 */
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
index d49202e..564b008 100644
--- a/arch/x86/pci/Makefile
+++ b/arch/x86/pci/Makefile
@@ -15,3 +15,8 @@
 
 obj-y				+= common.o early.o
 obj-y				+= amd_bus.o
+obj-$(CONFIG_X86_64)		+= bus_numa.o intel_bus.o
+
+ifeq ($(CONFIG_PCI_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 1014eb4..959e548 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -7,6 +7,7 @@
 #include <asm/pci_x86.h>
 
 struct pci_root_info {
+	struct acpi_device *bridge;
 	char *name;
 	unsigned int res_num;
 	struct resource *res;
@@ -58,6 +59,30 @@
 	return false;
 }
 
+static void
+align_resource(struct acpi_device *bridge, struct resource *res)
+{
+	int align = (res->flags & IORESOURCE_MEM) ? 16 : 4;
+
+	/*
+	 * Host bridge windows are not BARs, but the decoders on the PCI side
+	 * that claim this address space have starting alignment and length
+	 * constraints, so fix any obvious BIOS goofs.
+	 */
+	if (!IS_ALIGNED(res->start, align)) {
+		dev_printk(KERN_DEBUG, &bridge->dev,
+			   "host bridge window %pR invalid; "
+			   "aligning start to %d-byte boundary\n", res, align);
+		res->start &= ~(align - 1);
+	}
+	if (!IS_ALIGNED(res->end + 1, align)) {
+		dev_printk(KERN_DEBUG, &bridge->dev,
+			   "host bridge window %pR invalid; "
+			   "aligning end to %d-byte boundary\n", res, align);
+		res->end = ALIGN(res->end, align) - 1;
+	}
+}
+
 static acpi_status
 setup_resource(struct acpi_resource *acpi_res, void *data)
 {
@@ -91,11 +116,12 @@
 	start = addr.minimum + addr.translation_offset;
 	end = start + addr.address_length - 1;
 	if (info->res_num >= max_root_bus_resources) {
-		printk(KERN_WARNING "PCI: Failed to allocate 0x%lx-0x%lx "
-			"from %s for %s due to _CRS returning more than "
-			"%d resource descriptors\n", (unsigned long) start,
-			(unsigned long) end, root->name, info->name,
-			max_root_bus_resources);
+		if (pci_probe & PCI_USE__CRS)
+			printk(KERN_WARNING "PCI: Failed to allocate "
+			       "0x%lx-0x%lx from %s for %s due to _CRS "
+			       "returning more than %d resource descriptors\n",
+			       (unsigned long) start, (unsigned long) end,
+			       root->name, info->name, max_root_bus_resources);
 		return AE_OK;
 	}
 
@@ -105,14 +131,28 @@
 	res->start = start;
 	res->end = end;
 	res->child = NULL;
+	align_resource(info->bridge, res);
+
+	if (!(pci_probe & PCI_USE__CRS)) {
+		dev_printk(KERN_DEBUG, &info->bridge->dev,
+			   "host bridge window %pR (ignored)\n", res);
+		return AE_OK;
+	}
 
 	if (insert_resource(root, res)) {
-		printk(KERN_ERR "PCI: Failed to allocate 0x%lx-0x%lx "
-			"from %s for %s\n", (unsigned long) res->start,
-			(unsigned long) res->end, root->name, info->name);
+		dev_err(&info->bridge->dev,
+			"can't allocate host bridge window %pR\n", res);
 	} else {
 		info->bus->resource[info->res_num] = res;
 		info->res_num++;
+		if (addr.translation_offset)
+			dev_info(&info->bridge->dev, "host bridge window %pR "
+				 "(PCI address [%#llx-%#llx])\n",
+				 res, res->start - addr.translation_offset,
+				 res->end - addr.translation_offset);
+		else
+			dev_info(&info->bridge->dev,
+				 "host bridge window %pR\n", res);
 	}
 	return AE_OK;
 }
@@ -124,6 +164,12 @@
 	struct pci_root_info info;
 	size_t size;
 
+	if (!(pci_probe & PCI_USE__CRS))
+		dev_info(&device->dev,
+			 "ignoring host bridge windows from ACPI; "
+			 "boot with \"pci=use_crs\" to use them\n");
+
+	info.bridge = device;
 	info.bus = bus;
 	info.res_num = 0;
 	acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
@@ -163,8 +209,9 @@
 #endif
 
 	if (domain && !pci_domains_supported) {
-		printk(KERN_WARNING "PCI: Multiple domains not supported "
-		       "(dom %d, bus %d)\n", domain, busnum);
+		printk(KERN_WARNING "pci_bus %04x:%02x: "
+		       "ignored (multiple domains not supported)\n",
+		       domain, busnum);
 		return NULL;
 	}
 
@@ -188,7 +235,8 @@
 	 */
 	sd = kzalloc(sizeof(*sd), GFP_KERNEL);
 	if (!sd) {
-		printk(KERN_ERR "PCI: OOM, not probing PCI bus %02x\n", busnum);
+		printk(KERN_WARNING "pci_bus %04x:%02x: "
+		       "ignored (out of memory)\n", domain, busnum);
 		return NULL;
 	}
 
@@ -209,9 +257,7 @@
 	} else {
 		bus = pci_create_bus(NULL, busnum, &pci_root_ops, sd);
 		if (bus) {
-			if (pci_probe & PCI_USE__CRS)
-				get_current_resources(device, busnum, domain,
-							bus);
+			get_current_resources(device, busnum, domain, bus);
 			bus->subordinate = pci_scan_child_bus(bus);
 		}
 	}
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index 572ee97..95ecbd4 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -6,10 +6,10 @@
 
 #ifdef CONFIG_X86_64
 #include <asm/pci-direct.h>
-#include <asm/mpspec.h>
-#include <linux/cpumask.h>
 #endif
 
+#include "bus_numa.h"
+
 /*
  * This discovers the pcibus <-> node mapping on AMD K8.
  * also get peer root bus resource for io,mmio
@@ -17,67 +17,6 @@
 
 #ifdef CONFIG_X86_64
 
-/*
- * sub bus (transparent) will use entres from 3 to store extra from root,
- * so need to make sure have enought slot there, increase PCI_BUS_NUM_RESOURCES?
- */
-#define RES_NUM 16
-struct pci_root_info {
-	char name[12];
-	unsigned int res_num;
-	struct resource res[RES_NUM];
-	int bus_min;
-	int bus_max;
-	int node;
-	int link;
-};
-
-/* 4 at this time, it may become to 32 */
-#define PCI_ROOT_NR 4
-static int pci_root_num;
-static struct pci_root_info pci_root_info[PCI_ROOT_NR];
-
-void x86_pci_root_bus_res_quirks(struct pci_bus *b)
-{
-	int i;
-	int j;
-	struct pci_root_info *info;
-
-	/* don't go for it if _CRS is used already */
-	if (b->resource[0] != &ioport_resource ||
-	    b->resource[1] != &iomem_resource)
-		return;
-
-	/* if only one root bus, don't need to anything */
-	if (pci_root_num < 2)
-		return;
-
-	for (i = 0; i < pci_root_num; i++) {
-		if (pci_root_info[i].bus_min == b->number)
-			break;
-	}
-
-	if (i == pci_root_num)
-		return;
-
-	printk(KERN_DEBUG "PCI: peer root bus %02x res updated from pci conf\n",
-			b->number);
-
-	info = &pci_root_info[i];
-	for (j = 0; j < info->res_num; j++) {
-		struct resource *res;
-		struct resource *root;
-
-		res = &info->res[j];
-		b->resource[j] = res;
-		if (res->flags & IORESOURCE_IO)
-			root = &ioport_resource;
-		else
-			root = &iomem_resource;
-		insert_resource(root, res);
-	}
-}
-
 #define RANGE_NUM 16
 
 struct res_range {
@@ -130,52 +69,6 @@
 	}
 }
 
-static void __init update_res(struct pci_root_info *info, size_t start,
-			      size_t end, unsigned long flags, int merge)
-{
-	int i;
-	struct resource *res;
-
-	if (!merge)
-		goto addit;
-
-	/* try to merge it with old one */
-	for (i = 0; i < info->res_num; i++) {
-		size_t final_start, final_end;
-		size_t common_start, common_end;
-
-		res = &info->res[i];
-		if (res->flags != flags)
-			continue;
-
-		common_start = max((size_t)res->start, start);
-		common_end = min((size_t)res->end, end);
-		if (common_start > common_end + 1)
-			continue;
-
-		final_start = min((size_t)res->start, start);
-		final_end = max((size_t)res->end, end);
-
-		res->start = final_start;
-		res->end = final_end;
-		return;
-	}
-
-addit:
-
-	/* need to add that */
-	if (info->res_num >= RES_NUM)
-		return;
-
-	res = &info->res[info->res_num];
-	res->name = info->name;
-	res->flags = flags;
-	res->start = start;
-	res->end = end;
-	res->child = NULL;
-	info->res_num++;
-}
-
 struct pci_hostbridge_probe {
 	u32 bus;
 	u32 slot;
@@ -230,7 +123,6 @@
 	int j;
 	unsigned bus;
 	unsigned slot;
-	int found;
 	int node;
 	int link;
 	int def_node;
@@ -247,7 +139,7 @@
 	if (!early_pci_allowed())
 		return -1;
 
-	found = 0;
+	found_all_numa_early = 0;
 	for (i = 0; i < ARRAY_SIZE(pci_probes); i++) {
 		u32 id;
 		u16 device;
@@ -261,12 +153,12 @@
 		device = (id>>16) & 0xffff;
 		if (pci_probes[i].vendor == vendor &&
 		    pci_probes[i].device == device) {
-			found = 1;
+			found_all_numa_early = 1;
 			break;
 		}
 	}
 
-	if (!found)
+	if (!found_all_numa_early)
 		return 0;
 
 	pci_root_num = 0;
@@ -488,7 +380,7 @@
 		info = &pci_root_info[i];
 		res_num = info->res_num;
 		busnum = info->bus_min;
-		printk(KERN_DEBUG "bus: [%02x,%02x] on node %x link %x\n",
+		printk(KERN_DEBUG "bus: [%02x, %02x] on node %x link %x\n",
 		       info->bus_min, info->bus_max, info->node, info->link);
 		for (j = 0; j < res_num; j++) {
 			res = &info->res[j];
diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c
new file mode 100644
index 0000000..145df00
--- /dev/null
+++ b/arch/x86/pci/bus_numa.c
@@ -0,0 +1,101 @@
+#include <linux/init.h>
+#include <linux/pci.h>
+
+#include "bus_numa.h"
+
+int pci_root_num;
+struct pci_root_info pci_root_info[PCI_ROOT_NR];
+int found_all_numa_early;
+
+void x86_pci_root_bus_res_quirks(struct pci_bus *b)
+{
+	int i;
+	int j;
+	struct pci_root_info *info;
+
+	/* don't go for it if _CRS is used already */
+	if (b->resource[0] != &ioport_resource ||
+	    b->resource[1] != &iomem_resource)
+		return;
+
+	if (!pci_root_num)
+		return;
+
+	/* for amd, if only one root bus, don't need to do anything */
+	if (pci_root_num < 2 && found_all_numa_early)
+		return;
+
+	for (i = 0; i < pci_root_num; i++) {
+		if (pci_root_info[i].bus_min == b->number)
+			break;
+	}
+
+	if (i == pci_root_num)
+		return;
+
+	printk(KERN_DEBUG "PCI: peer root bus %02x res updated from pci conf\n",
+			b->number);
+
+	info = &pci_root_info[i];
+	for (j = 0; j < info->res_num; j++) {
+		struct resource *res;
+		struct resource *root;
+
+		res = &info->res[j];
+		b->resource[j] = res;
+		if (res->flags & IORESOURCE_IO)
+			root = &ioport_resource;
+		else
+			root = &iomem_resource;
+		insert_resource(root, res);
+	}
+}
+
+void __init update_res(struct pci_root_info *info, size_t start,
+			      size_t end, unsigned long flags, int merge)
+{
+	int i;
+	struct resource *res;
+
+	if (start > end)
+		return;
+
+	if (!merge)
+		goto addit;
+
+	/* try to merge it with old one */
+	for (i = 0; i < info->res_num; i++) {
+		size_t final_start, final_end;
+		size_t common_start, common_end;
+
+		res = &info->res[i];
+		if (res->flags != flags)
+			continue;
+
+		common_start = max((size_t)res->start, start);
+		common_end = min((size_t)res->end, end);
+		if (common_start > common_end + 1)
+			continue;
+
+		final_start = min((size_t)res->start, start);
+		final_end = max((size_t)res->end, end);
+
+		res->start = final_start;
+		res->end = final_end;
+		return;
+	}
+
+addit:
+
+	/* need to add that */
+	if (info->res_num >= RES_NUM)
+		return;
+
+	res = &info->res[info->res_num];
+	res->name = info->name;
+	res->flags = flags;
+	res->start = start;
+	res->end = end;
+	res->child = NULL;
+	info->res_num++;
+}
diff --git a/arch/x86/pci/bus_numa.h b/arch/x86/pci/bus_numa.h
new file mode 100644
index 0000000..adbc23f
--- /dev/null
+++ b/arch/x86/pci/bus_numa.h
@@ -0,0 +1,27 @@
+#ifdef CONFIG_X86_64
+
+/*
+ * sub bus (transparent) will use entres from 3 to store extra from
+ * root, so need to make sure we have enough slot there, Should we
+ * increase PCI_BUS_NUM_RESOURCES?
+ */
+#define RES_NUM 16
+struct pci_root_info {
+	char name[12];
+	unsigned int res_num;
+	struct resource res[RES_NUM];
+	int bus_min;
+	int bus_max;
+	int node;
+	int link;
+};
+
+/* 4 at this time, it may become to 32 */
+#define PCI_ROOT_NR 4
+extern int pci_root_num;
+extern struct pci_root_info pci_root_info[PCI_ROOT_NR];
+extern int found_all_numa_early;
+
+extern void update_res(struct pci_root_info *info, size_t start,
+			      size_t end, unsigned long flags, int merge);
+#endif
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 1331fcf..d2552c6 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -410,8 +410,6 @@
 	return bus;
 }
 
-extern u8 pci_cache_line_size;
-
 int __init pcibios_init(void)
 {
 	struct cpuinfo_x86 *c = &boot_cpu_data;
@@ -422,15 +420,19 @@
 	}
 
 	/*
-	 * Assume PCI cacheline size of 32 bytes for all x86s except K7/K8
-	 * and P4. It's also good for 386/486s (which actually have 16)
+	 * Set PCI cacheline size to that of the CPU if the CPU has reported it.
+	 * (For older CPUs that don't support cpuid, we se it to 32 bytes
+	 * It's also good for 386/486s (which actually have 16)
 	 * as quite a few PCI devices do not support smaller values.
 	 */
-	pci_cache_line_size = 32 >> 2;
-	if (c->x86 >= 6 && c->x86_vendor == X86_VENDOR_AMD)
-		pci_cache_line_size = 64 >> 2;	/* K7 & K8 */
-	else if (c->x86 > 6 && c->x86_vendor == X86_VENDOR_INTEL)
-		pci_cache_line_size = 128 >> 2;	/* P4 */
+	if (c->x86_clflush_size > 0) {
+		pci_dfl_cache_line_size = c->x86_clflush_size >> 2;
+		printk(KERN_DEBUG "PCI: pci_cache_line_size set to %d bytes\n",
+			pci_dfl_cache_line_size << 2);
+	} else {
+ 		pci_dfl_cache_line_size = 32 >> 2;
+		printk(KERN_DEBUG "PCI: Unknown cacheline size. Setting to 32 bytes\n");
+	}
 
 	pcibios_resource_survey();
 
diff --git a/arch/x86/pci/early.c b/arch/x86/pci/early.c
index aaf26ae..d1067d5 100644
--- a/arch/x86/pci/early.c
+++ b/arch/x86/pci/early.c
@@ -12,8 +12,6 @@
 	u32 v;
 	outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
 	v = inl(0xcfc);
-	if (v != 0xffffffff)
-		pr_debug("%x reading 4 from %x: %x\n", slot, offset, v);
 	return v;
 }
 
@@ -22,7 +20,6 @@
 	u8 v;
 	outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
 	v = inb(0xcfc + (offset&3));
-	pr_debug("%x reading 1 from %x: %x\n", slot, offset, v);
 	return v;
 }
 
@@ -31,28 +28,24 @@
 	u16 v;
 	outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
 	v = inw(0xcfc + (offset&2));
-	pr_debug("%x reading 2 from %x: %x\n", slot, offset, v);
 	return v;
 }
 
 void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset,
 				    u32 val)
 {
-	pr_debug("%x writing to %x: %x\n", slot, offset, val);
 	outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
 	outl(val, 0xcfc);
 }
 
 void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val)
 {
-	pr_debug("%x writing to %x: %x\n", slot, offset, val);
 	outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
 	outb(val, 0xcfc + (offset&3));
 }
 
 void write_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset, u16 val)
 {
-	pr_debug("%x writing to %x: %x\n", slot, offset, val);
 	outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
 	outw(val, 0xcfc + (offset&2));
 }
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index b22d13b..5dc9e8c 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -129,7 +129,9 @@
 					continue;
 				if (!r->start ||
 				    pci_claim_resource(dev, idx) < 0) {
-					dev_info(&dev->dev, "BAR %d: can't allocate resource\n", idx);
+					dev_info(&dev->dev,
+						 "can't reserve window %pR\n",
+						 r);
 					/*
 					 * Something is wrong with the region.
 					 * Invalidate the resource to prevent
@@ -144,16 +146,29 @@
 	}
 }
 
+struct pci_check_idx_range {
+	int start;
+	int end;
+};
+
 static void __init pcibios_allocate_resources(int pass)
 {
 	struct pci_dev *dev = NULL;
-	int idx, disabled;
+	int idx, disabled, i;
 	u16 command;
 	struct resource *r;
 
+	struct pci_check_idx_range idx_range[] = {
+		{ PCI_STD_RESOURCES, PCI_STD_RESOURCE_END },
+#ifdef CONFIG_PCI_IOV
+		{ PCI_IOV_RESOURCES, PCI_IOV_RESOURCE_END },
+#endif
+	};
+
 	for_each_pci_dev(dev) {
 		pci_read_config_word(dev, PCI_COMMAND, &command);
-		for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
+		for (i = 0; i < ARRAY_SIZE(idx_range); i++)
+		for (idx = idx_range[i].start; idx <= idx_range[i].end; idx++) {
 			r = &dev->resource[idx];
 			if (r->parent)		/* Already allocated */
 				continue;
@@ -164,12 +179,12 @@
 			else
 				disabled = !(command & PCI_COMMAND_MEMORY);
 			if (pass == disabled) {
-				dev_dbg(&dev->dev, "resource %#08llx-%#08llx (f=%lx, d=%d, p=%d)\n",
-					(unsigned long long) r->start,
-					(unsigned long long) r->end,
-					r->flags, disabled, pass);
+				dev_dbg(&dev->dev,
+					"BAR %d: reserving %pr (d=%d, p=%d)\n",
+					idx, r, disabled, pass);
 				if (pci_claim_resource(dev, idx) < 0) {
-					dev_info(&dev->dev, "BAR %d: can't allocate resource\n", idx);
+					dev_info(&dev->dev,
+						 "can't reserve %pR\n", r);
 					/* We'll assign a new address later */
 					r->end -= r->start;
 					r->start = 0;
@@ -182,7 +197,7 @@
 				/* Turn the ROM off, leave the resource region,
 				 * but keep it unregistered. */
 				u32 reg;
-				dev_dbg(&dev->dev, "disabling ROM\n");
+				dev_dbg(&dev->dev, "disabling ROM %pR\n", r);
 				r->flags &= ~IORESOURCE_ROM_ENABLE;
 				pci_read_config_dword(dev,
 						dev->rom_base_reg, &reg);
@@ -282,6 +297,15 @@
 		return -EINVAL;
 
 	prot = pgprot_val(vma->vm_page_prot);
+
+	/*
+ 	 * Return error if pat is not enabled and write_combine is requested.
+ 	 * Caller can followup with UC MINUS request and add a WC mtrr if there
+ 	 * is a free mtrr slot.
+ 	 */
+	if (!pat_enabled && write_combine)
+		return -EINVAL;
+
 	if (pat_enabled && write_combine)
 		prot |= _PAGE_CACHE_WC;
 	else if (pat_enabled || boot_cpu_data.x86 > 3)
diff --git a/arch/x86/pci/intel_bus.c b/arch/x86/pci/intel_bus.c
new file mode 100644
index 0000000..b7a55dc
--- /dev/null
+++ b/arch/x86/pci/intel_bus.c
@@ -0,0 +1,90 @@
+/*
+ * to read io range from IOH pci conf, need to do it after mmconfig is there
+ */
+
+#include <linux/delay.h>
+#include <linux/dmi.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <asm/pci_x86.h>
+
+#include "bus_numa.h"
+
+static inline void print_ioh_resources(struct pci_root_info *info)
+{
+	int res_num;
+	int busnum;
+	int i;
+
+	printk(KERN_DEBUG "IOH bus: [%02x, %02x]\n",
+			info->bus_min, info->bus_max);
+	res_num = info->res_num;
+	busnum = info->bus_min;
+	for (i = 0; i < res_num; i++) {
+		struct resource *res;
+
+		res = &info->res[i];
+		printk(KERN_DEBUG "IOH bus: %02x index %x %s: [%llx, %llx]\n",
+			busnum, i,
+			(res->flags & IORESOURCE_IO) ? "io port" :
+							"mmio",
+			res->start, res->end);
+	}
+}
+
+#define IOH_LIO			0x108
+#define IOH_LMMIOL		0x10c
+#define IOH_LMMIOH		0x110
+#define IOH_LMMIOH_BASEU	0x114
+#define IOH_LMMIOH_LIMITU	0x118
+#define IOH_LCFGBUS		0x11c
+
+static void __devinit pci_root_bus_res(struct pci_dev *dev)
+{
+	u16 word;
+	u32 dword;
+	struct pci_root_info *info;
+	u16 io_base, io_end;
+	u32 mmiol_base, mmiol_end;
+	u64 mmioh_base, mmioh_end;
+	int bus_base, bus_end;
+
+	if (pci_root_num >= PCI_ROOT_NR) {
+		printk(KERN_DEBUG "intel_bus.c: PCI_ROOT_NR is too small\n");
+		return;
+	}
+
+	info = &pci_root_info[pci_root_num];
+	pci_root_num++;
+
+	pci_read_config_word(dev, IOH_LCFGBUS, &word);
+	bus_base = (word & 0xff);
+	bus_end = (word & 0xff00) >> 8;
+	sprintf(info->name, "PCI Bus #%02x", bus_base);
+	info->bus_min = bus_base;
+	info->bus_max = bus_end;
+
+	pci_read_config_word(dev, IOH_LIO, &word);
+	io_base = (word & 0xf0) << (12 - 4);
+	io_end = (word & 0xf000) | 0xfff;
+	update_res(info, io_base, io_end, IORESOURCE_IO, 0);
+
+	pci_read_config_dword(dev, IOH_LMMIOL, &dword);
+	mmiol_base = (dword & 0xff00) << (24 - 8);
+	mmiol_end = (dword & 0xff000000) | 0xffffff;
+	update_res(info, mmiol_base, mmiol_end, IORESOURCE_MEM, 0);
+
+	pci_read_config_dword(dev, IOH_LMMIOH, &dword);
+	mmioh_base = ((u64)(dword & 0xfc00)) << (26 - 10);
+	mmioh_end = ((u64)(dword & 0xfc000000) | 0x3ffffff);
+	pci_read_config_dword(dev, IOH_LMMIOH_BASEU, &dword);
+	mmioh_base |= ((u64)(dword & 0x7ffff)) << 32;
+	pci_read_config_dword(dev, IOH_LMMIOH_LIMITU, &dword);
+	mmioh_end |= ((u64)(dword & 0x7ffff)) << 32;
+	update_res(info, mmioh_base, mmioh_end, IORESOURCE_MEM, 0);
+
+	print_ioh_resources(info);
+}
+
+/* intel IOH */
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, pci_root_bus_res);
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 602c172..b19d1e5 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -15,48 +15,98 @@
 #include <linux/acpi.h>
 #include <linux/sfi_acpi.h>
 #include <linux/bitmap.h>
-#include <linux/sort.h>
+#include <linux/dmi.h>
 #include <asm/e820.h>
 #include <asm/pci_x86.h>
 #include <asm/acpi.h>
 
 #define PREFIX "PCI: "
 
-/* aperture is up to 256MB but BIOS may reserve less */
-#define MMCONFIG_APER_MIN	(2 * 1024*1024)
-#define MMCONFIG_APER_MAX	(256 * 1024*1024)
-
 /* Indicate if the mmcfg resources have been placed into the resource table. */
 static int __initdata pci_mmcfg_resources_inserted;
 
-static __init int extend_mmcfg(int num)
+LIST_HEAD(pci_mmcfg_list);
+
+static __init void pci_mmconfig_remove(struct pci_mmcfg_region *cfg)
 {
-	struct acpi_mcfg_allocation *new;
-	int new_num = pci_mmcfg_config_num + num;
-
-	new = kzalloc(sizeof(pci_mmcfg_config[0]) * new_num, GFP_KERNEL);
-	if (!new)
-		return -1;
-
-	if (pci_mmcfg_config) {
-		memcpy(new, pci_mmcfg_config,
-			 sizeof(pci_mmcfg_config[0]) * new_num);
-		kfree(pci_mmcfg_config);
-	}
-	pci_mmcfg_config = new;
-
-	return 0;
+	if (cfg->res.parent)
+		release_resource(&cfg->res);
+	list_del(&cfg->list);
+	kfree(cfg);
 }
 
-static __init void fill_one_mmcfg(u64 addr, int segment, int start, int end)
+static __init void free_all_mmcfg(void)
 {
-	int i = pci_mmcfg_config_num;
+	struct pci_mmcfg_region *cfg, *tmp;
 
-	pci_mmcfg_config_num++;
-	pci_mmcfg_config[i].address = addr;
-	pci_mmcfg_config[i].pci_segment = segment;
-	pci_mmcfg_config[i].start_bus_number = start;
-	pci_mmcfg_config[i].end_bus_number = end;
+	pci_mmcfg_arch_free();
+	list_for_each_entry_safe(cfg, tmp, &pci_mmcfg_list, list)
+		pci_mmconfig_remove(cfg);
+}
+
+static __init void list_add_sorted(struct pci_mmcfg_region *new)
+{
+	struct pci_mmcfg_region *cfg;
+
+	/* keep list sorted by segment and starting bus number */
+	list_for_each_entry(cfg, &pci_mmcfg_list, list) {
+		if (cfg->segment > new->segment ||
+		    (cfg->segment == new->segment &&
+		     cfg->start_bus >= new->start_bus)) {
+			list_add_tail(&new->list, &cfg->list);
+			return;
+		}
+	}
+	list_add_tail(&new->list, &pci_mmcfg_list);
+}
+
+static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start,
+							int end, u64 addr)
+{
+	struct pci_mmcfg_region *new;
+	int num_buses;
+	struct resource *res;
+
+	if (addr == 0)
+		return NULL;
+
+	new = kzalloc(sizeof(*new), GFP_KERNEL);
+	if (!new)
+		return NULL;
+
+	new->address = addr;
+	new->segment = segment;
+	new->start_bus = start;
+	new->end_bus = end;
+
+	list_add_sorted(new);
+
+	num_buses = end - start + 1;
+	res = &new->res;
+	res->start = addr + PCI_MMCFG_BUS_OFFSET(start);
+	res->end = addr + PCI_MMCFG_BUS_OFFSET(num_buses) - 1;
+	res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+	snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN,
+		 "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end);
+	res->name = new->name;
+
+	printk(KERN_INFO PREFIX "MMCONFIG for domain %04x [bus %02x-%02x] at "
+	       "%pR (base %#lx)\n", segment, start, end, &new->res,
+	       (unsigned long) addr);
+
+	return new;
+}
+
+struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus)
+{
+	struct pci_mmcfg_region *cfg;
+
+	list_for_each_entry(cfg, &pci_mmcfg_list, list)
+		if (cfg->segment == segment &&
+		    cfg->start_bus <= bus && bus <= cfg->end_bus)
+			return cfg;
+
+	return NULL;
 }
 
 static const char __init *pci_mmcfg_e7520(void)
@@ -68,11 +118,9 @@
 	if (win == 0x0000 || win == 0xf000)
 		return NULL;
 
-	if (extend_mmcfg(1) == -1)
+	if (pci_mmconfig_add(0, 0, 255, win << 16) == NULL)
 		return NULL;
 
-	fill_one_mmcfg(win << 16, 0, 0, 255);
-
 	return "Intel Corporation E7520 Memory Controller Hub";
 }
 
@@ -114,11 +162,9 @@
 	if ((pciexbar & mask) >= 0xf0000000U)
 		return NULL;
 
-	if (extend_mmcfg(1) == -1)
+	if (pci_mmconfig_add(0, 0, (len >> 20) - 1, pciexbar & mask) == NULL)
 		return NULL;
 
-	fill_one_mmcfg(pciexbar & mask, 0, 0, (len >> 20) - 1);
-
 	return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub";
 }
 
@@ -127,7 +173,7 @@
 	u32 low, high, address;
 	u64 base, msr;
 	int i;
-	unsigned segnbits = 0, busnbits;
+	unsigned segnbits = 0, busnbits, end_bus;
 
 	if (!(pci_probe & PCI_CHECK_ENABLE_AMD_MMCONF))
 		return NULL;
@@ -161,11 +207,13 @@
 		busnbits = 8;
 	}
 
-	if (extend_mmcfg(1 << segnbits) == -1)
-		return NULL;
-
+	end_bus = (1 << busnbits) - 1;
 	for (i = 0; i < (1 << segnbits); i++)
-		fill_one_mmcfg(base + (1<<28) * i, i, 0, (1 << busnbits) - 1);
+		if (pci_mmconfig_add(i, 0, end_bus,
+				     base + (1<<28) * i) == NULL) {
+			free_all_mmcfg();
+			return NULL;
+		}
 
 	return "AMD Family 10h NB";
 }
@@ -190,7 +238,7 @@
 	/*
 	 * do check if amd fam10h already took over
 	 */
-	if (!acpi_disabled || pci_mmcfg_config_num || mcp55_checked)
+	if (!acpi_disabled || !list_empty(&pci_mmcfg_list) || mcp55_checked)
 		return NULL;
 
 	mcp55_checked = true;
@@ -213,16 +261,14 @@
 		if (!(extcfg & extcfg_enable_mask))
 			continue;
 
-		if (extend_mmcfg(1) == -1)
-			continue;
-
 		size_index = (extcfg & extcfg_size_mask) >> extcfg_size_shift;
 		base = extcfg & extcfg_base_mask[size_index];
 		/* base could > 4G */
 		base <<= extcfg_base_lshift;
 		start = (extcfg & extcfg_start_mask) >> extcfg_start_shift;
 		end = start + extcfg_sizebus[size_index] - 1;
-		fill_one_mmcfg(base, 0, start, end);
+		if (pci_mmconfig_add(0, start, end, base) == NULL)
+			continue;
 		mcp55_mmconf_found++;
 	}
 
@@ -253,45 +299,27 @@
 	  0x0369, pci_mmcfg_nvidia_mcp55 },
 };
 
-static int __init cmp_mmcfg(const void *x1, const void *x2)
-{
-	const typeof(pci_mmcfg_config[0]) *m1 = x1;
-	const typeof(pci_mmcfg_config[0]) *m2 = x2;
-	int start1, start2;
-
-	start1 = m1->start_bus_number;
-	start2 = m2->start_bus_number;
-
-	return start1 - start2;
-}
-
 static void __init pci_mmcfg_check_end_bus_number(void)
 {
-	int i;
-	typeof(pci_mmcfg_config[0]) *cfg, *cfgx;
-
-	/* sort them at first */
-	sort(pci_mmcfg_config, pci_mmcfg_config_num,
-		 sizeof(pci_mmcfg_config[0]), cmp_mmcfg, NULL);
+	struct pci_mmcfg_region *cfg, *cfgx;
 
 	/* last one*/
-	if (pci_mmcfg_config_num > 0) {
-		i = pci_mmcfg_config_num - 1;
-		cfg = &pci_mmcfg_config[i];
-		if (cfg->end_bus_number < cfg->start_bus_number)
-			cfg->end_bus_number = 255;
-	}
+	cfg = list_entry(pci_mmcfg_list.prev, typeof(*cfg), list);
+	if (cfg)
+		if (cfg->end_bus < cfg->start_bus)
+			cfg->end_bus = 255;
+
+	if (list_is_singular(&pci_mmcfg_list))
+		return;
 
 	/* don't overlap please */
-	for (i = 0; i < pci_mmcfg_config_num - 1; i++) {
-		cfg = &pci_mmcfg_config[i];
-		cfgx = &pci_mmcfg_config[i+1];
+	list_for_each_entry(cfg, &pci_mmcfg_list, list) {
+		if (cfg->end_bus < cfg->start_bus)
+			cfg->end_bus = 255;
 
-		if (cfg->end_bus_number < cfg->start_bus_number)
-			cfg->end_bus_number = 255;
-
-		if (cfg->end_bus_number >= cfgx->start_bus_number)
-			cfg->end_bus_number = cfgx->start_bus_number - 1;
+		cfgx = list_entry(cfg->list.next, typeof(*cfg), list);
+		if (cfg != cfgx && cfg->end_bus >= cfgx->start_bus)
+			cfg->end_bus = cfgx->start_bus - 1;
 	}
 }
 
@@ -306,8 +334,7 @@
 	if (!raw_pci_ops)
 		return 0;
 
-	pci_mmcfg_config_num = 0;
-	pci_mmcfg_config = NULL;
+	free_all_mmcfg();
 
 	for (i = 0; i < ARRAY_SIZE(pci_mmcfg_probes); i++) {
 		bus =  pci_mmcfg_probes[i].bus;
@@ -322,45 +349,22 @@
 			name = pci_mmcfg_probes[i].probe();
 
 		if (name)
-			printk(KERN_INFO "PCI: Found %s with MMCONFIG support.\n",
+			printk(KERN_INFO PREFIX "%s with MMCONFIG support\n",
 			       name);
 	}
 
 	/* some end_bus_number is crazy, fix it */
 	pci_mmcfg_check_end_bus_number();
 
-	return pci_mmcfg_config_num != 0;
+	return !list_empty(&pci_mmcfg_list);
 }
 
 static void __init pci_mmcfg_insert_resources(void)
 {
-#define PCI_MMCFG_RESOURCE_NAME_LEN 24
-	int i;
-	struct resource *res;
-	char *names;
-	unsigned num_buses;
+	struct pci_mmcfg_region *cfg;
 
-	res = kcalloc(PCI_MMCFG_RESOURCE_NAME_LEN + sizeof(*res),
-			pci_mmcfg_config_num, GFP_KERNEL);
-	if (!res) {
-		printk(KERN_ERR "PCI: Unable to allocate MMCONFIG resources\n");
-		return;
-	}
-
-	names = (void *)&res[pci_mmcfg_config_num];
-	for (i = 0; i < pci_mmcfg_config_num; i++, res++) {
-		struct acpi_mcfg_allocation *cfg = &pci_mmcfg_config[i];
-		num_buses = cfg->end_bus_number - cfg->start_bus_number + 1;
-		res->name = names;
-		snprintf(names, PCI_MMCFG_RESOURCE_NAME_LEN,
-			 "PCI MMCONFIG %u [%02x-%02x]", cfg->pci_segment,
-			 cfg->start_bus_number, cfg->end_bus_number);
-		res->start = cfg->address + (cfg->start_bus_number << 20);
-		res->end = res->start + (num_buses << 20) - 1;
-		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-		insert_resource(&iomem_resource, res);
-		names += PCI_MMCFG_RESOURCE_NAME_LEN;
-	}
+	list_for_each_entry(cfg, &pci_mmcfg_list, list)
+		insert_resource(&iomem_resource, &cfg->res);
 
 	/* Mark that the resources have been inserted. */
 	pci_mmcfg_resources_inserted = 1;
@@ -437,11 +441,12 @@
 typedef int (*check_reserved_t)(u64 start, u64 end, unsigned type);
 
 static int __init is_mmconf_reserved(check_reserved_t is_reserved,
-		u64 addr, u64 size, int i,
-		typeof(pci_mmcfg_config[0]) *cfg, int with_e820)
+				    struct pci_mmcfg_region *cfg, int with_e820)
 {
+	u64 addr = cfg->res.start;
+	u64 size = resource_size(&cfg->res);
 	u64 old_size = size;
-	int valid = 0;
+	int valid = 0, num_buses;
 
 	while (!is_reserved(addr, addr + size, E820_RESERVED)) {
 		size >>= 1;
@@ -450,19 +455,25 @@
 	}
 
 	if (size >= (16UL<<20) || size == old_size) {
-		printk(KERN_NOTICE
-		       "PCI: MCFG area at %Lx reserved in %s\n",
-			addr, with_e820?"E820":"ACPI motherboard resources");
+		printk(KERN_INFO PREFIX "MMCONFIG at %pR reserved in %s\n",
+		       &cfg->res,
+		       with_e820 ? "E820" : "ACPI motherboard resources");
 		valid = 1;
 
 		if (old_size != size) {
-			/* update end_bus_number */
-			cfg->end_bus_number = cfg->start_bus_number + ((size>>20) - 1);
-			printk(KERN_NOTICE "PCI: updated MCFG configuration %d: base %lx "
-			       "segment %hu buses %u - %u\n",
-			       i, (unsigned long)cfg->address, cfg->pci_segment,
-			       (unsigned int)cfg->start_bus_number,
-			       (unsigned int)cfg->end_bus_number);
+			/* update end_bus */
+			cfg->end_bus = cfg->start_bus + ((size>>20) - 1);
+			num_buses = cfg->end_bus - cfg->start_bus + 1;
+			cfg->res.end = cfg->res.start +
+			    PCI_MMCFG_BUS_OFFSET(num_buses) - 1;
+			snprintf(cfg->name, PCI_MMCFG_RESOURCE_NAME_LEN,
+				 "PCI MMCONFIG %04x [bus %02x-%02x]",
+				 cfg->segment, cfg->start_bus, cfg->end_bus);
+			printk(KERN_INFO PREFIX
+			       "MMCONFIG for %04x [bus%02x-%02x] "
+			       "at %pR (base %#lx) (size reduced!)\n",
+			       cfg->segment, cfg->start_bus, cfg->end_bus,
+			       &cfg->res, (unsigned long) cfg->address);
 		}
 	}
 
@@ -471,45 +482,26 @@
 
 static void __init pci_mmcfg_reject_broken(int early)
 {
-	typeof(pci_mmcfg_config[0]) *cfg;
-	int i;
+	struct pci_mmcfg_region *cfg;
 
-	if ((pci_mmcfg_config_num == 0) ||
-	    (pci_mmcfg_config == NULL) ||
-	    (pci_mmcfg_config[0].address == 0))
-		return;
-
-	for (i = 0; i < pci_mmcfg_config_num; i++) {
+	list_for_each_entry(cfg, &pci_mmcfg_list, list) {
 		int valid = 0;
-		u64 addr, size;
-
-		cfg = &pci_mmcfg_config[i];
-		addr = cfg->start_bus_number;
-		addr <<= 20;
-		addr += cfg->address;
-		size = cfg->end_bus_number + 1 - cfg->start_bus_number;
-		size <<= 20;
-		printk(KERN_NOTICE "PCI: MCFG configuration %d: base %lx "
-		       "segment %hu buses %u - %u\n",
-		       i, (unsigned long)cfg->address, cfg->pci_segment,
-		       (unsigned int)cfg->start_bus_number,
-		       (unsigned int)cfg->end_bus_number);
 
 		if (!early && !acpi_disabled)
-			valid = is_mmconf_reserved(is_acpi_reserved, addr, size, i, cfg, 0);
+			valid = is_mmconf_reserved(is_acpi_reserved, cfg, 0);
 
 		if (valid)
 			continue;
 
 		if (!early)
-			printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %Lx is not"
-			       " reserved in ACPI motherboard resources\n",
-			       cfg->address);
+			printk(KERN_ERR FW_BUG PREFIX
+			       "MMCONFIG at %pR not reserved in "
+			       "ACPI motherboard resources\n", &cfg->res);
 
 		/* Don't try to do this check unless configuration
 		   type 1 is available. how about type 2 ?*/
 		if (raw_pci_ops)
-			valid = is_mmconf_reserved(e820_all_mapped, addr, size, i, cfg, 1);
+			valid = is_mmconf_reserved(e820_all_mapped, cfg, 1);
 
 		if (!valid)
 			goto reject;
@@ -518,34 +510,41 @@
 	return;
 
 reject:
-	printk(KERN_INFO "PCI: Not using MMCONFIG.\n");
-	pci_mmcfg_arch_free();
-	kfree(pci_mmcfg_config);
-	pci_mmcfg_config = NULL;
-	pci_mmcfg_config_num = 0;
+	printk(KERN_INFO PREFIX "not using MMCONFIG\n");
+	free_all_mmcfg();
 }
 
 static int __initdata known_bridge;
 
-static int acpi_mcfg_64bit_base_addr __initdata = FALSE;
-
-/* The physical address of the MMCONFIG aperture.  Set from ACPI tables. */
-struct acpi_mcfg_allocation *pci_mmcfg_config;
-int pci_mmcfg_config_num;
-
-static int __init acpi_mcfg_oem_check(struct acpi_table_mcfg *mcfg)
+static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
+					struct acpi_mcfg_allocation *cfg)
 {
-	if (!strcmp(mcfg->header.oem_id, "SGI"))
-		acpi_mcfg_64bit_base_addr = TRUE;
+	int year;
 
-	return 0;
+	if (cfg->address < 0xFFFFFFFF)
+		return 0;
+
+	if (!strcmp(mcfg->header.oem_id, "SGI"))
+		return 0;
+
+	if (mcfg->header.revision >= 1) {
+		if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) &&
+		    year >= 2010)
+			return 0;
+	}
+
+	printk(KERN_ERR PREFIX "MCFG region for %04x [bus %02x-%02x] at %#llx "
+	       "is above 4GB, ignored\n", cfg->pci_segment,
+	       cfg->start_bus_number, cfg->end_bus_number, cfg->address);
+	return -EINVAL;
 }
 
 static int __init pci_parse_mcfg(struct acpi_table_header *header)
 {
 	struct acpi_table_mcfg *mcfg;
+	struct acpi_mcfg_allocation *cfg_table, *cfg;
 	unsigned long i;
-	int config_size;
+	int entries;
 
 	if (!header)
 		return -EINVAL;
@@ -553,38 +552,33 @@
 	mcfg = (struct acpi_table_mcfg *)header;
 
 	/* how many config structures do we have */
-	pci_mmcfg_config_num = 0;
+	free_all_mmcfg();
+	entries = 0;
 	i = header->length - sizeof(struct acpi_table_mcfg);
 	while (i >= sizeof(struct acpi_mcfg_allocation)) {
-		++pci_mmcfg_config_num;
+		entries++;
 		i -= sizeof(struct acpi_mcfg_allocation);
 	};
-	if (pci_mmcfg_config_num == 0) {
+	if (entries == 0) {
 		printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
 		return -ENODEV;
 	}
 
-	config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config);
-	pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL);
-	if (!pci_mmcfg_config) {
-		printk(KERN_WARNING PREFIX
-		       "No memory for MCFG config tables\n");
-		return -ENOMEM;
-	}
-
-	memcpy(pci_mmcfg_config, &mcfg[1], config_size);
-
-	acpi_mcfg_oem_check(mcfg);
-
-	for (i = 0; i < pci_mmcfg_config_num; ++i) {
-		if ((pci_mmcfg_config[i].address > 0xFFFFFFFF) &&
-		    !acpi_mcfg_64bit_base_addr) {
-			printk(KERN_ERR PREFIX
-			       "MMCONFIG not in low 4GB of memory\n");
-			kfree(pci_mmcfg_config);
-			pci_mmcfg_config_num = 0;
+	cfg_table = (struct acpi_mcfg_allocation *) &mcfg[1];
+	for (i = 0; i < entries; i++) {
+		cfg = &cfg_table[i];
+		if (acpi_mcfg_check_entry(mcfg, cfg)) {
+			free_all_mmcfg();
 			return -ENODEV;
 		}
+
+		if (pci_mmconfig_add(cfg->pci_segment, cfg->start_bus_number,
+				   cfg->end_bus_number, cfg->address) == NULL) {
+			printk(KERN_WARNING PREFIX
+			       "no memory for MCFG entries\n");
+			free_all_mmcfg();
+			return -ENOMEM;
+		}
 	}
 
 	return 0;
@@ -614,9 +608,7 @@
 
 	pci_mmcfg_reject_broken(early);
 
-	if ((pci_mmcfg_config_num == 0) ||
-	    (pci_mmcfg_config == NULL) ||
-	    (pci_mmcfg_config[0].address == 0))
+	if (list_empty(&pci_mmcfg_list))
 		return;
 
 	if (pci_mmcfg_arch_init())
@@ -648,9 +640,7 @@
 	 */
 	if ((pci_mmcfg_resources_inserted == 1) ||
 	    (pci_probe & PCI_PROBE_MMCONF) == 0 ||
-	    (pci_mmcfg_config_num == 0) ||
-	    (pci_mmcfg_config == NULL) ||
-	    (pci_mmcfg_config[0].address == 0))
+	    list_empty(&pci_mmcfg_list))
 		return 1;
 
 	/*
diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
index f10a7e9..90d5fd4 100644
--- a/arch/x86/pci/mmconfig_32.c
+++ b/arch/x86/pci/mmconfig_32.c
@@ -27,18 +27,10 @@
  */
 static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
 {
-	struct acpi_mcfg_allocation *cfg;
-	int cfg_num;
+	struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus);
 
-	for (cfg_num = 0; cfg_num < pci_mmcfg_config_num; cfg_num++) {
-		cfg = &pci_mmcfg_config[cfg_num];
-		if (cfg->pci_segment == seg &&
-		    (cfg->start_bus_number <= bus) &&
-		    (cfg->end_bus_number >= bus))
-			return cfg->address;
-	}
-
-	/* Fall back to type 0 */
+	if (cfg)
+		return cfg->address;
 	return 0;
 }
 
@@ -47,7 +39,7 @@
  */
 static void pci_exp_set_dev_base(unsigned int base, int bus, int devfn)
 {
-	u32 dev_base = base | (bus << 20) | (devfn << 12);
+	u32 dev_base = base | PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12);
 	int cpu = smp_processor_id();
 	if (dev_base != mmcfg_last_accessed_device ||
 	    cpu != mmcfg_last_accessed_cpu) {
diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
index 94349f8..e783841 100644
--- a/arch/x86/pci/mmconfig_64.c
+++ b/arch/x86/pci/mmconfig_64.c
@@ -12,38 +12,15 @@
 #include <asm/e820.h>
 #include <asm/pci_x86.h>
 
-/* Static virtual mapping of the MMCONFIG aperture */
-struct mmcfg_virt {
-	struct acpi_mcfg_allocation *cfg;
-	char __iomem *virt;
-};
-static struct mmcfg_virt *pci_mmcfg_virt;
-
-static char __iomem *get_virt(unsigned int seg, unsigned bus)
-{
-	struct acpi_mcfg_allocation *cfg;
-	int cfg_num;
-
-	for (cfg_num = 0; cfg_num < pci_mmcfg_config_num; cfg_num++) {
-		cfg = pci_mmcfg_virt[cfg_num].cfg;
-		if (cfg->pci_segment == seg &&
-		    (cfg->start_bus_number <= bus) &&
-		    (cfg->end_bus_number >= bus))
-			return pci_mmcfg_virt[cfg_num].virt;
-	}
-
-	/* Fall back to type 0 */
-	return NULL;
-}
+#define PREFIX "PCI: "
 
 static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn)
 {
-	char __iomem *addr;
+	struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus);
 
-	addr = get_virt(seg, bus);
-	if (!addr)
-		return NULL;
- 	return addr + ((bus << 20) | (devfn << 12));
+	if (cfg && cfg->virt)
+		return cfg->virt + (PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12));
+	return NULL;
 }
 
 static int pci_mmcfg_read(unsigned int seg, unsigned int bus,
@@ -109,42 +86,30 @@
 	.write =	pci_mmcfg_write,
 };
 
-static void __iomem * __init mcfg_ioremap(struct acpi_mcfg_allocation *cfg)
+static void __iomem * __init mcfg_ioremap(struct pci_mmcfg_region *cfg)
 {
 	void __iomem *addr;
 	u64 start, size;
+	int num_buses;
 
-	start = cfg->start_bus_number;
-	start <<= 20;
-	start += cfg->address;
-	size = cfg->end_bus_number + 1 - cfg->start_bus_number;
-	size <<= 20;
+	start = cfg->address + PCI_MMCFG_BUS_OFFSET(cfg->start_bus);
+	num_buses = cfg->end_bus - cfg->start_bus + 1;
+	size = PCI_MMCFG_BUS_OFFSET(num_buses);
 	addr = ioremap_nocache(start, size);
-	if (addr) {
-		printk(KERN_INFO "PCI: Using MMCONFIG at %Lx - %Lx\n",
-		       start, start + size - 1);
-		addr -= cfg->start_bus_number << 20;
-	}
+	if (addr)
+		addr -= PCI_MMCFG_BUS_OFFSET(cfg->start_bus);
 	return addr;
 }
 
 int __init pci_mmcfg_arch_init(void)
 {
-	int i;
-	pci_mmcfg_virt = kzalloc(sizeof(*pci_mmcfg_virt) *
-				 pci_mmcfg_config_num, GFP_KERNEL);
-	if (pci_mmcfg_virt == NULL) {
-		printk(KERN_ERR "PCI: Can not allocate memory for mmconfig structures\n");
-		return 0;
-	}
+	struct pci_mmcfg_region *cfg;
 
-	for (i = 0; i < pci_mmcfg_config_num; ++i) {
-		pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i];
-		pci_mmcfg_virt[i].virt = mcfg_ioremap(&pci_mmcfg_config[i]);
-		if (!pci_mmcfg_virt[i].virt) {
-			printk(KERN_ERR "PCI: Cannot map mmconfig aperture for "
-					"segment %d\n",
-				pci_mmcfg_config[i].pci_segment);
+	list_for_each_entry(cfg, &pci_mmcfg_list, list) {
+		cfg->virt = mcfg_ioremap(cfg);
+		if (!cfg->virt) {
+			printk(KERN_ERR PREFIX "can't map MMCONFIG at %pR\n",
+			       &cfg->res);
 			pci_mmcfg_arch_free();
 			return 0;
 		}
@@ -155,19 +120,12 @@
 
 void __init pci_mmcfg_arch_free(void)
 {
-	int i;
+	struct pci_mmcfg_region *cfg;
 
-	if (pci_mmcfg_virt == NULL)
-		return;
-
-	for (i = 0; i < pci_mmcfg_config_num; ++i) {
-		if (pci_mmcfg_virt[i].virt) {
-			iounmap(pci_mmcfg_virt[i].virt + (pci_mmcfg_virt[i].cfg->start_bus_number << 20));
-			pci_mmcfg_virt[i].virt = NULL;
-			pci_mmcfg_virt[i].cfg = NULL;
+	list_for_each_entry(cfg, &pci_mmcfg_list, list) {
+		if (cfg->virt) {
+			iounmap(cfg->virt + PCI_MMCFG_BUS_OFFSET(cfg->start_bus));
+			cfg->virt = NULL;
 		}
 	}
-
-	kfree(pci_mmcfg_virt);
-	pci_mmcfg_virt = NULL;
 }
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index b8e45f1..2b26dd5 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -27,7 +27,9 @@
 #include <linux/page-flags.h>
 #include <linux/highmem.h>
 #include <linux/console.h>
+#include <linux/pci.h>
 
+#include <xen/xen.h>
 #include <xen/interface/xen.h>
 #include <xen/interface/version.h>
 #include <xen/interface/physdev.h>
@@ -1175,7 +1177,11 @@
 		add_preferred_console("xenboot", 0, NULL);
 		add_preferred_console("tty", 0, NULL);
 		add_preferred_console("hvc", 0, NULL);
+	} else {
+		/* Make sure ACS will be enabled */
+		pci_request_acs();
 	}
+		
 
 	xen_raw_console_write("about to get started...\n");
 
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h
index 05cebf8..4352dbe 100644
--- a/arch/xtensa/include/asm/syscall.h
+++ b/arch/xtensa/include/asm/syscall.h
@@ -13,8 +13,6 @@
 asmlinkage long xtensa_execve(char*, char**, char**, struct pt_regs*);
 asmlinkage long xtensa_clone(unsigned long, unsigned long, struct pt_regs*);
 asmlinkage long xtensa_pipe(int __user *);
-asmlinkage long xtensa_mmap2(unsigned long, unsigned long, unsigned long,
-    			     unsigned long, unsigned long, unsigned long);
 asmlinkage long xtensa_ptrace(long, long, long, long);
 asmlinkage long xtensa_sigreturn(struct pt_regs*);
 asmlinkage long xtensa_rt_sigreturn(struct pt_regs*);
diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h
index 4e55dc7..fbf318b 100644
--- a/arch/xtensa/include/asm/unistd.h
+++ b/arch/xtensa/include/asm/unistd.h
@@ -189,7 +189,7 @@
 /* File Map / Shared Memory Operations */
 
 #define __NR_mmap2 				 80
-__SYSCALL( 80, xtensa_mmap2, 6)
+__SYSCALL( 80, sys_mmap_pgoff, 6)
 #define __NR_munmap 				 81
 __SYSCALL( 81, sys_munmap, 2)
 #define __NR_mprotect 				 82
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index ac15ecb..1e67bab 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -57,31 +57,6 @@
 	return error;
 }
 
-
-asmlinkage long xtensa_mmap2(unsigned long addr, unsigned long len,
-   			     unsigned long prot, unsigned long flags,
-			     unsigned long fd, unsigned long pgoff)
-{
-	int error = -EBADF;
-	struct file * file = NULL;
-
-	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-	if (!(flags & MAP_ANONYMOUS)) {
-		file = fget(fd);
-		if (!file)
-			goto out;
-	}
-
-	down_write(&current->mm->mmap_sem);
-	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-	up_write(&current->mm->mmap_sem);
-
-	if (file)
-		fput(file);
-out:
-	return error;
-}
-
 asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
 {
 	unsigned long ret;
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 7702118..c7b10b4 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -19,6 +19,7 @@
 
 # All the builtin files are in the "acpi." module_param namespace.
 acpi-y				+= osl.o utils.o reboot.o
+acpi-y				+= hest.o
 
 # sleep related files
 acpi-y				+= wakeup.o
diff --git a/drivers/acpi/hest.c b/drivers/acpi/hest.c
new file mode 100644
index 0000000..4bb18c9
--- /dev/null
+++ b/drivers/acpi/hest.c
@@ -0,0 +1,135 @@
+#include <linux/acpi.h>
+#include <linux/pci.h>
+
+#define PREFIX "ACPI: "
+
+static inline unsigned long parse_acpi_hest_ia_machine_check(struct acpi_hest_ia_machine_check *p)
+{
+	return sizeof(*p) +
+		(sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks);
+}
+
+static inline unsigned long parse_acpi_hest_ia_corrected(struct acpi_hest_ia_corrected *p)
+{
+	return sizeof(*p) +
+		(sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks);
+}
+
+static inline unsigned long parse_acpi_hest_ia_nmi(struct acpi_hest_ia_nmi *p)
+{
+	return sizeof(*p);
+}
+
+static inline unsigned long parse_acpi_hest_generic(struct acpi_hest_generic *p)
+{
+	return sizeof(*p);
+}
+
+static inline unsigned int hest_match_pci(struct acpi_hest_aer_common *p, struct pci_dev *pci)
+{
+	return	(0           == pci_domain_nr(pci->bus) &&
+		 p->bus      == pci->bus->number &&
+		 p->device   == PCI_SLOT(pci->devfn) &&
+		 p->function == PCI_FUNC(pci->devfn));
+}
+
+static unsigned long parse_acpi_hest_aer(void *hdr, int type, struct pci_dev *pci, int *firmware_first)
+{
+	struct acpi_hest_aer_common *p = hdr + sizeof(struct acpi_hest_header);
+	unsigned long rc=0;
+	u8 pcie_type = 0;
+	u8 bridge = 0;
+	switch (type) {
+	case ACPI_HEST_TYPE_AER_ROOT_PORT:
+		rc = sizeof(struct acpi_hest_aer_root);
+		pcie_type = PCI_EXP_TYPE_ROOT_PORT;
+		break;
+	case ACPI_HEST_TYPE_AER_ENDPOINT:
+		rc = sizeof(struct acpi_hest_aer);
+		pcie_type = PCI_EXP_TYPE_ENDPOINT;
+		break;
+	case ACPI_HEST_TYPE_AER_BRIDGE:
+		rc = sizeof(struct acpi_hest_aer_bridge);
+		if ((pci->class >> 16) == PCI_BASE_CLASS_BRIDGE)
+			bridge = 1;
+		break;
+	}
+
+	if (p->flags & ACPI_HEST_GLOBAL) {
+		if ((pci->is_pcie && (pci->pcie_type == pcie_type)) || bridge)
+			*firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
+	}
+	else
+		if (hest_match_pci(p, pci))
+			*firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
+	return rc;
+}
+
+static int acpi_hest_firmware_first(struct acpi_table_header *stdheader, struct pci_dev *pci)
+{
+	struct acpi_table_hest *hest = (struct acpi_table_hest *)stdheader;
+	void *p = (void *)hest + sizeof(*hest); /* defined by the ACPI 4.0 spec */
+	struct acpi_hest_header *hdr = p;
+
+	int i;
+	int firmware_first = 0;
+	static unsigned char printed_unused = 0;
+	static unsigned char printed_reserved = 0;
+
+	for (i=0, hdr=p; p < (((void *)hest) + hest->header.length) && i < hest->error_source_count; i++) {
+		switch (hdr->type) {
+		case ACPI_HEST_TYPE_IA32_CHECK:
+			p += parse_acpi_hest_ia_machine_check(p);
+			break;
+		case ACPI_HEST_TYPE_IA32_CORRECTED_CHECK:
+			p += parse_acpi_hest_ia_corrected(p);
+			break;
+		case ACPI_HEST_TYPE_IA32_NMI:
+			p += parse_acpi_hest_ia_nmi(p);
+			break;
+		/* These three should never appear */
+		case ACPI_HEST_TYPE_NOT_USED3:
+		case ACPI_HEST_TYPE_NOT_USED4:
+		case ACPI_HEST_TYPE_NOT_USED5:
+			if (!printed_unused) {
+				printk(KERN_DEBUG PREFIX
+				       "HEST Error Source list contains an obsolete type (%d).\n", hdr->type);
+				printed_unused = 1;
+			}
+			break;
+		case ACPI_HEST_TYPE_AER_ROOT_PORT:
+		case ACPI_HEST_TYPE_AER_ENDPOINT:
+		case ACPI_HEST_TYPE_AER_BRIDGE:
+			p += parse_acpi_hest_aer(p, hdr->type, pci, &firmware_first);
+			break;
+		case ACPI_HEST_TYPE_GENERIC_ERROR:
+			p += parse_acpi_hest_generic(p);
+			break;
+		/* These should never appear either */
+		case ACPI_HEST_TYPE_RESERVED:
+		default:
+			if (!printed_reserved) {
+				printk(KERN_DEBUG PREFIX
+				       "HEST Error Source list contains a reserved type (%d).\n", hdr->type);
+				printed_reserved = 1;
+			}
+			break;
+		}
+	}
+	return firmware_first;
+}
+
+int acpi_hest_firmware_first_pci(struct pci_dev *pci)
+{
+	acpi_status status = AE_NOT_FOUND;
+	struct acpi_table_header *hest = NULL;
+	status = acpi_get_table(ACPI_SIG_HEST, 1, &hest);
+
+	if (ACPI_SUCCESS(status)) {
+		if (acpi_hest_firmware_first(hest, pci)) {
+			return 1;
+		}
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_hest_firmware_first_pci);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index b8578bb..05a31e5 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -42,6 +42,7 @@
 #include <linux/module.h>
 #include <linux/scatterlist.h>
 
+#include <xen/xen.h>
 #include <xen/xenbus.h>
 #include <xen/grant_table.h>
 #include <xen/events.h>
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 44bc8bb..4d29059 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -1066,7 +1066,7 @@
 		return 0;
 
 	spin_lock_irq(&data->txlock);
-	if (!(interface_to_usbdev(intf)->auto_pm && data->tx_in_flight)) {
+	if (!((message.event & PM_EVENT_AUTO) && data->tx_in_flight)) {
 		set_bit(BTUSB_SUSPENDING, &data->flags);
 		spin_unlock_irq(&data->txlock);
 	} else {
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 3cb56a0..30c36ac 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -36,10 +36,10 @@
 #define PCI_DEVICE_ID_INTEL_82965GME_IG     0x2A12
 #define PCI_DEVICE_ID_INTEL_82945GME_HB     0x27AC
 #define PCI_DEVICE_ID_INTEL_82945GME_IG     0x27AE
-#define PCI_DEVICE_ID_INTEL_IGDGM_HB        0xA010
-#define PCI_DEVICE_ID_INTEL_IGDGM_IG        0xA011
-#define PCI_DEVICE_ID_INTEL_IGDG_HB         0xA000
-#define PCI_DEVICE_ID_INTEL_IGDG_IG         0xA001
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB        0xA010
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG        0xA011
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB         0xA000
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG         0xA001
 #define PCI_DEVICE_ID_INTEL_G33_HB          0x29C0
 #define PCI_DEVICE_ID_INTEL_G33_IG          0x29C2
 #define PCI_DEVICE_ID_INTEL_Q35_HB          0x29B0
@@ -50,20 +50,20 @@
 #define PCI_DEVICE_ID_INTEL_B43_IG          0x2E42
 #define PCI_DEVICE_ID_INTEL_GM45_HB         0x2A40
 #define PCI_DEVICE_ID_INTEL_GM45_IG         0x2A42
-#define PCI_DEVICE_ID_INTEL_IGD_E_HB        0x2E00
-#define PCI_DEVICE_ID_INTEL_IGD_E_IG        0x2E02
+#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB        0x2E00
+#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG        0x2E02
 #define PCI_DEVICE_ID_INTEL_Q45_HB          0x2E10
 #define PCI_DEVICE_ID_INTEL_Q45_IG          0x2E12
 #define PCI_DEVICE_ID_INTEL_G45_HB          0x2E20
 #define PCI_DEVICE_ID_INTEL_G45_IG          0x2E22
 #define PCI_DEVICE_ID_INTEL_G41_HB          0x2E30
 #define PCI_DEVICE_ID_INTEL_G41_IG          0x2E32
-#define PCI_DEVICE_ID_INTEL_IGDNG_D_HB	    0x0040
-#define PCI_DEVICE_ID_INTEL_IGDNG_D_IG	    0x0042
-#define PCI_DEVICE_ID_INTEL_IGDNG_M_HB	    0x0044
-#define PCI_DEVICE_ID_INTEL_IGDNG_MA_HB	    0x0062
-#define PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB    0x006a
-#define PCI_DEVICE_ID_INTEL_IGDNG_M_IG	    0x0046
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB	    0x0040
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG	    0x0042
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB	    0x0044
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB	    0x0062
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB    0x006a
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG	    0x0046
 
 /* cover 915 and 945 variants */
 #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
@@ -83,22 +83,22 @@
 #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
 		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
 		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
 
-#define IS_IGD (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
+#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
 
-#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \
+#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
 		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
 		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
 		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
 		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
 		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB)
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB)
 
 extern int agp_memory_reserved;
 
@@ -178,6 +178,7 @@
 	 * popup and for the GTT.
 	 */
 	int gtt_entries;			/* i830+ */
+	int gtt_total_size;
 	union {
 		void __iomem *i9xx_flush_page;
 		void *i8xx_flush_page;
@@ -653,7 +654,7 @@
 			size = 512;
 		}
 		size += 4; /* add in BIOS popup space */
-	} else if (IS_G33 && !IS_IGD) {
+	} else if (IS_G33 && !IS_PINEVIEW) {
 	/* G33's GTT size defined in gmch_ctrl */
 		switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
 		case G33_PGETBL_SIZE_1M:
@@ -669,7 +670,7 @@
 			size = 512;
 		}
 		size += 4;
-	} else if (IS_G4X || IS_IGD) {
+	} else if (IS_G4X || IS_PINEVIEW) {
 		/* On 4 series hardware, GTT stolen is separate from graphics
 		 * stolen, ignore it in stolen gtt entries counting.  However,
 		 * 4KB of the stolen memory doesn't get mapped to the GTT.
@@ -1153,7 +1154,7 @@
 	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
 
 	if (agp_bridge->driver->needs_scratch_page) {
-		for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
+		for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
 			writel(agp_bridge->scratch_page, intel_private.gtt+i);
 		}
 		readl(intel_private.gtt+i-1);	/* PCI Posting. */
@@ -1308,6 +1309,8 @@
 	if (!intel_private.gtt)
 		return -ENOMEM;
 
+	intel_private.gtt_total_size = gtt_map_size / 4;
+
 	temp &= 0xfff80000;
 
 	intel_private.registers = ioremap(temp, 128 * 4096);
@@ -1352,15 +1355,15 @@
 {
 	switch (agp_bridge->dev->device) {
 	case PCI_DEVICE_ID_INTEL_GM45_HB:
-	case PCI_DEVICE_ID_INTEL_IGD_E_HB:
+	case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
 	case PCI_DEVICE_ID_INTEL_Q45_HB:
 	case PCI_DEVICE_ID_INTEL_G45_HB:
 	case PCI_DEVICE_ID_INTEL_G41_HB:
 	case PCI_DEVICE_ID_INTEL_B43_HB:
-	case PCI_DEVICE_ID_INTEL_IGDNG_D_HB:
-	case PCI_DEVICE_ID_INTEL_IGDNG_M_HB:
-	case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB:
-	case PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB:
+	case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
+	case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
+	case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
+	case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
 		*gtt_offset = *gtt_size = MB(2);
 		break;
 	default:
@@ -1395,6 +1398,8 @@
 	if (!intel_private.gtt)
 		return -ENOMEM;
 
+	intel_private.gtt_total_size = gtt_size / 4;
+
 	intel_private.registers = ioremap(temp, 128 * 4096);
 	if (!intel_private.registers) {
 		iounmap(intel_private.gtt);
@@ -2340,14 +2345,14 @@
 		NULL, &intel_g33_driver },
 	{ PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
 		NULL, &intel_g33_driver },
-	{ PCI_DEVICE_ID_INTEL_IGDGM_HB, PCI_DEVICE_ID_INTEL_IGDGM_IG, 0, "IGD",
+	{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "Pineview",
 		NULL, &intel_g33_driver },
-	{ PCI_DEVICE_ID_INTEL_IGDG_HB, PCI_DEVICE_ID_INTEL_IGDG_IG, 0, "IGD",
+	{ PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "Pineview",
 		NULL, &intel_g33_driver },
 	{ PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
-	    "Mobile Intel® GM45 Express", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0,
-	    "Intel Integrated Graphics Device", NULL, &intel_i965_driver },
+	    "GM45", NULL, &intel_i965_driver },
+	{ PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, 0,
+	    "Eaglelake", NULL, &intel_i965_driver },
 	{ PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0,
 	    "Q45/Q43", NULL, &intel_i965_driver },
 	{ PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0,
@@ -2356,14 +2361,14 @@
 	    "B43", NULL, &intel_i965_driver },
 	{ PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
 	    "G41", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_IGDNG_D_HB, PCI_DEVICE_ID_INTEL_IGDNG_D_IG, 0,
-	    "IGDNG/D", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_IGDNG_M_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
-	    "IGDNG/M", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_IGDNG_MA_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
-	    "IGDNG/MA", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
-	    "IGDNG/MC2", NULL, &intel_i965_driver },
+	{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0,
+	    "Ironlake/D", NULL, &intel_i965_driver },
+	{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+	    "Ironlake/M", NULL, &intel_i965_driver },
+	{ PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+	    "Ironlake/MA", NULL, &intel_i965_driver },
+	{ PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+	    "Ironlake/MC2", NULL, &intel_i965_driver },
 	{ 0, 0, 0, NULL, NULL, NULL }
 };
 
@@ -2545,8 +2550,8 @@
 	ID(PCI_DEVICE_ID_INTEL_82945G_HB),
 	ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
 	ID(PCI_DEVICE_ID_INTEL_82945GME_HB),
-	ID(PCI_DEVICE_ID_INTEL_IGDGM_HB),
-	ID(PCI_DEVICE_ID_INTEL_IGDG_HB),
+	ID(PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB),
+	ID(PCI_DEVICE_ID_INTEL_PINEVIEW_HB),
 	ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
 	ID(PCI_DEVICE_ID_INTEL_82G35_HB),
 	ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
@@ -2557,15 +2562,15 @@
 	ID(PCI_DEVICE_ID_INTEL_Q35_HB),
 	ID(PCI_DEVICE_ID_INTEL_Q33_HB),
 	ID(PCI_DEVICE_ID_INTEL_GM45_HB),
-	ID(PCI_DEVICE_ID_INTEL_IGD_E_HB),
+	ID(PCI_DEVICE_ID_INTEL_EAGLELAKE_HB),
 	ID(PCI_DEVICE_ID_INTEL_Q45_HB),
 	ID(PCI_DEVICE_ID_INTEL_G45_HB),
 	ID(PCI_DEVICE_ID_INTEL_G41_HB),
 	ID(PCI_DEVICE_ID_INTEL_B43_HB),
-	ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB),
-	ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB),
-	ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB),
-	ID(PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB),
+	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
+	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
+	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
+	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
 	{ }
 };
 
diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
index a6ee32b..b1a7163 100644
--- a/drivers/char/hvc_xen.c
+++ b/drivers/char/hvc_xen.c
@@ -25,6 +25,8 @@
 #include <linux/types.h>
 
 #include <asm/xen/hypervisor.h>
+
+#include <xen/xen.h>
 #include <xen/page.h>
 #include <xen/events.h>
 #include <xen/interface/io/console.h>
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index a38831c..5fdd6da 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -19,26 +19,48 @@
 static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES];
 
 /*
- * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only
- * for DDR2 DRAM mapping.
+ * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
+ * later.
  */
-u32 revf_quad_ddr2_shift[] = {
-	0,	/* 0000b NULL DIMM (128mb) */
-	28,	/* 0001b 256mb */
-	29,	/* 0010b 512mb */
-	29,	/* 0011b 512mb */
-	29,	/* 0100b 512mb */
-	30,	/* 0101b 1gb */
-	30,	/* 0110b 1gb */
-	31,	/* 0111b 2gb */
-	31,	/* 1000b 2gb */
-	32,	/* 1001b 4gb */
-	32,	/* 1010b 4gb */
-	33,	/* 1011b 8gb */
-	0,	/* 1100b future */
-	0,	/* 1101b future */
-	0,	/* 1110b future */
-	0	/* 1111b future */
+static int ddr2_dbam_revCG[] = {
+			   [0]		= 32,
+			   [1]		= 64,
+			   [2]		= 128,
+			   [3]		= 256,
+			   [4]		= 512,
+			   [5]		= 1024,
+			   [6]		= 2048,
+};
+
+static int ddr2_dbam_revD[] = {
+			   [0]		= 32,
+			   [1]		= 64,
+			   [2 ... 3]	= 128,
+			   [4]		= 256,
+			   [5]		= 512,
+			   [6]		= 256,
+			   [7]		= 512,
+			   [8 ... 9]	= 1024,
+			   [10]		= 2048,
+};
+
+static int ddr2_dbam[] = { [0]		= 128,
+			   [1]		= 256,
+			   [2 ... 4]	= 512,
+			   [5 ... 6]	= 1024,
+			   [7 ... 8]	= 2048,
+			   [9 ... 10]	= 4096,
+			   [11]		= 8192,
+};
+
+static int ddr3_dbam[] = { [0]		= -1,
+			   [1]		= 256,
+			   [2]		= 512,
+			   [3 ... 4]	= -1,
+			   [5 ... 6]	= 1024,
+			   [7 ... 8]	= 2048,
+			   [9 ... 10]	= 4096,
+			   [11]	= 8192,
 };
 
 /*
@@ -164,11 +186,9 @@
 {
 	struct amd64_pvt *pvt = mci->pvt_info;
 	u32 scrubval = 0;
-	int status = -1, i, ret = 0;
+	int status = -1, i;
 
-	ret = pci_read_config_dword(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval);
-	if (ret)
-		debugf0("Reading K8_SCRCTRL failed\n");
+	amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval);
 
 	scrubval = scrubval & 0x001F;
 
@@ -189,7 +209,7 @@
 /* Map from a CSROW entry to the mask entry that operates on it */
 static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
 {
-	if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F)
+	if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F)
 		return csrow;
 	else
 		return csrow >> 1;
@@ -437,7 +457,7 @@
 	u64 base;
 
 	/* only revE and later have the DRAM Hole Address Register */
-	if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_E) {
+	if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
 		debugf1("  revision %d for node %d does not support DHAR\n",
 			pvt->ext_model, pvt->mc_node_id);
 		return 1;
@@ -743,21 +763,6 @@
 	*input_addr_max = base | mask | pvt->dcs_mask_notused;
 }
 
-/*
- * Extract error address from MCA NB Address Low (section 3.6.4.5) and MCA NB
- * Address High (section 3.6.4.6) register values and return the result. Address
- * is located in the info structure (nbeah and nbeal), the encoding is device
- * specific.
- */
-static u64 extract_error_address(struct mem_ctl_info *mci,
-				 struct err_regs *info)
-{
-	struct amd64_pvt *pvt = mci->pvt_info;
-
-	return pvt->ops->get_error_address(mci, info);
-}
-
-
 /* Map the Error address to a PAGE and PAGE OFFSET. */
 static inline void error_address_to_page_and_offset(u64 error_address,
 						    u32 *page, u32 *offset)
@@ -787,7 +792,7 @@
 	return csrow;
 }
 
-static int get_channel_from_ecc_syndrome(unsigned short syndrome);
+static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
 
 static void amd64_cpu_display_info(struct amd64_pvt *pvt)
 {
@@ -797,7 +802,7 @@
 		edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n");
 	else if (boot_cpu_data.x86 == 0xf)
 		edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n",
-			(pvt->ext_model >= OPTERON_CPU_REV_F) ?
+			(pvt->ext_model >= K8_REV_F) ?
 			"Rev F or later" : "Rev E or earlier");
 	else
 		/* we'll hardly ever ever get here */
@@ -813,7 +818,7 @@
 	int bit;
 	enum dev_type edac_cap = EDAC_FLAG_NONE;
 
-	bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= OPTERON_CPU_REV_F)
+	bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
 		? 19
 		: 17;
 
@@ -824,75 +829,66 @@
 }
 
 
-static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt,
-					 int ganged);
+static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt);
+
+static void amd64_dump_dramcfg_low(u32 dclr, int chan)
+{
+	debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
+
+	debugf1("  DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
+		(dclr & BIT(16)) ?  "un" : "",
+		(dclr & BIT(19)) ? "yes" : "no");
+
+	debugf1("  PAR/ERR parity: %s\n",
+		(dclr & BIT(8)) ?  "enabled" : "disabled");
+
+	debugf1("  DCT 128bit mode width: %s\n",
+		(dclr & BIT(11)) ?  "128b" : "64b");
+
+	debugf1("  x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
+		(dclr & BIT(12)) ?  "yes" : "no",
+		(dclr & BIT(13)) ?  "yes" : "no",
+		(dclr & BIT(14)) ?  "yes" : "no",
+		(dclr & BIT(15)) ?  "yes" : "no");
+}
 
 /* Display and decode various NB registers for debug purposes. */
 static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
 {
 	int ganged;
 
-	debugf1("  nbcap:0x%8.08x DctDualCap=%s DualNode=%s 8-Node=%s\n",
-		pvt->nbcap,
-		(pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "True" : "False",
-		(pvt->nbcap & K8_NBCAP_DUAL_NODE) ? "True" : "False",
-		(pvt->nbcap & K8_NBCAP_8_NODE) ? "True" : "False");
-	debugf1("    ECC Capable=%s   ChipKill Capable=%s\n",
-		(pvt->nbcap & K8_NBCAP_SECDED) ? "True" : "False",
-		(pvt->nbcap & K8_NBCAP_CHIPKILL) ? "True" : "False");
-	debugf1("  DramCfg0-low=0x%08x DIMM-ECC=%s Parity=%s Width=%s\n",
-		pvt->dclr0,
-		(pvt->dclr0 & BIT(19)) ?  "Enabled" : "Disabled",
-		(pvt->dclr0 & BIT(8)) ?  "Enabled" : "Disabled",
-		(pvt->dclr0 & BIT(11)) ?  "128b" : "64b");
-	debugf1("    DIMM x4 Present: L0=%s L1=%s L2=%s L3=%s  DIMM Type=%s\n",
-		(pvt->dclr0 & BIT(12)) ?  "Y" : "N",
-		(pvt->dclr0 & BIT(13)) ?  "Y" : "N",
-		(pvt->dclr0 & BIT(14)) ?  "Y" : "N",
-		(pvt->dclr0 & BIT(15)) ?  "Y" : "N",
-		(pvt->dclr0 & BIT(16)) ?  "UN-Buffered" : "Buffered");
+	debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
 
+	debugf1("  NB two channel DRAM capable: %s\n",
+		(pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no");
 
-	debugf1("  online-spare: 0x%8.08x\n", pvt->online_spare);
+	debugf1("  ECC capable: %s, ChipKill ECC capable: %s\n",
+		(pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no",
+		(pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no");
 
+	amd64_dump_dramcfg_low(pvt->dclr0, 0);
+
+	debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
+
+	debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
+			"offset: 0x%08x\n",
+			pvt->dhar,
+			dhar_base(pvt->dhar),
+			(boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar)
+						   : f10_dhar_offset(pvt->dhar));
+
+	debugf1("  DramHoleValid: %s\n",
+		(pvt->dhar & DHAR_VALID) ? "yes" : "no");
+
+	/* everything below this point is Fam10h and above */
 	if (boot_cpu_data.x86 == 0xf) {
-		debugf1("  dhar: 0x%8.08x Base=0x%08x Offset=0x%08x\n",
-			pvt->dhar, dhar_base(pvt->dhar),
-			k8_dhar_offset(pvt->dhar));
-		debugf1("      DramHoleValid=%s\n",
-			(pvt->dhar & DHAR_VALID) ?  "True" : "False");
-
-		debugf1("  dbam-dkt: 0x%8.08x\n", pvt->dbam0);
-
-		/* everything below this point is Fam10h and above */
+		amd64_debug_display_dimm_sizes(0, pvt);
 		return;
-
-	} else {
-		debugf1("  dhar: 0x%8.08x Base=0x%08x Offset=0x%08x\n",
-			pvt->dhar, dhar_base(pvt->dhar),
-			f10_dhar_offset(pvt->dhar));
-		debugf1("    DramMemHoistValid=%s DramHoleValid=%s\n",
-			(pvt->dhar & F10_DRAM_MEM_HOIST_VALID) ?
-			"True" : "False",
-			(pvt->dhar & DHAR_VALID) ?
-			"True" : "False");
 	}
 
-	/* Only if NOT ganged does dcl1 have valid info */
-	if (!dct_ganging_enabled(pvt)) {
-		debugf1("  DramCfg1-low=0x%08x DIMM-ECC=%s Parity=%s "
-			"Width=%s\n", pvt->dclr1,
-			(pvt->dclr1 & BIT(19)) ?  "Enabled" : "Disabled",
-			(pvt->dclr1 & BIT(8)) ?  "Enabled" : "Disabled",
-			(pvt->dclr1 & BIT(11)) ?  "128b" : "64b");
-		debugf1("    DIMM x4 Present: L0=%s L1=%s L2=%s L3=%s  "
-			"DIMM Type=%s\n",
-			(pvt->dclr1 & BIT(12)) ?  "Y" : "N",
-			(pvt->dclr1 & BIT(13)) ?  "Y" : "N",
-			(pvt->dclr1 & BIT(14)) ?  "Y" : "N",
-			(pvt->dclr1 & BIT(15)) ?  "Y" : "N",
-			(pvt->dclr1 & BIT(16)) ?  "UN-Buffered" : "Buffered");
-	}
+	/* Only if NOT ganged does dclr1 have valid info */
+	if (!dct_ganging_enabled(pvt))
+		amd64_dump_dramcfg_low(pvt->dclr1, 1);
 
 	/*
 	 * Determine if ganged and then dump memory sizes for first controller,
@@ -900,35 +896,19 @@
 	 */
 	ganged = dct_ganging_enabled(pvt);
 
-	f10_debug_display_dimm_sizes(0, pvt, ganged);
+	amd64_debug_display_dimm_sizes(0, pvt);
 
 	if (!ganged)
-		f10_debug_display_dimm_sizes(1, pvt, ganged);
+		amd64_debug_display_dimm_sizes(1, pvt);
 }
 
 /* Read in both of DBAM registers */
 static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
 {
-	int err = 0;
-	unsigned int reg;
+	amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM0, &pvt->dbam0);
 
-	reg = DBAM0;
-	err = pci_read_config_dword(pvt->dram_f2_ctl, reg, &pvt->dbam0);
-	if (err)
-		goto err_reg;
-
-	if (boot_cpu_data.x86 >= 0x10) {
-		reg = DBAM1;
-		err = pci_read_config_dword(pvt->dram_f2_ctl, reg, &pvt->dbam1);
-
-		if (err)
-			goto err_reg;
-	}
-
-	return;
-
-err_reg:
-	debugf0("Error reading F2x%03x.\n", reg);
+	if (boot_cpu_data.x86 >= 0x10)
+		amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM1, &pvt->dbam1);
 }
 
 /*
@@ -963,7 +943,7 @@
 static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
 {
 
-	if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F) {
+	if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
 		pvt->dcsb_base		= REV_E_DCSB_BASE_BITS;
 		pvt->dcsm_mask		= REV_E_DCSM_MASK_BITS;
 		pvt->dcs_mask_notused	= REV_E_DCS_NOTUSED_BITS;
@@ -991,28 +971,21 @@
  */
 static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
 {
-	int cs, reg, err = 0;
+	int cs, reg;
 
 	amd64_set_dct_base_and_mask(pvt);
 
 	for (cs = 0; cs < pvt->cs_count; cs++) {
 		reg = K8_DCSB0 + (cs * 4);
-		err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
-						&pvt->dcsb0[cs]);
-		if (unlikely(err))
-			debugf0("Reading K8_DCSB0[%d] failed\n", cs);
-		else
+		if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsb0[cs]))
 			debugf0("  DCSB0[%d]=0x%08x reg: F2x%x\n",
 				cs, pvt->dcsb0[cs], reg);
 
 		/* If DCT are NOT ganged, then read in DCT1's base */
 		if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
 			reg = F10_DCSB1 + (cs * 4);
-			err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
-							&pvt->dcsb1[cs]);
-			if (unlikely(err))
-				debugf0("Reading F10_DCSB1[%d] failed\n", cs);
-			else
+			if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg,
+						&pvt->dcsb1[cs]))
 				debugf0("  DCSB1[%d]=0x%08x reg: F2x%x\n",
 					cs, pvt->dcsb1[cs], reg);
 		} else {
@@ -1022,26 +995,20 @@
 
 	for (cs = 0; cs < pvt->num_dcsm; cs++) {
 		reg = K8_DCSM0 + (cs * 4);
-		err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
-					&pvt->dcsm0[cs]);
-		if (unlikely(err))
-			debugf0("Reading K8_DCSM0 failed\n");
-		else
+		if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsm0[cs]))
 			debugf0("    DCSM0[%d]=0x%08x reg: F2x%x\n",
 				cs, pvt->dcsm0[cs], reg);
 
 		/* If DCT are NOT ganged, then read in DCT1's mask */
 		if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
 			reg = F10_DCSM1 + (cs * 4);
-			err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
-					&pvt->dcsm1[cs]);
-			if (unlikely(err))
-				debugf0("Reading F10_DCSM1[%d] failed\n", cs);
-			else
+			if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg,
+						&pvt->dcsm1[cs]))
 				debugf0("    DCSM1[%d]=0x%08x reg: F2x%x\n",
 					cs, pvt->dcsm1[cs], reg);
-		} else
+		} else {
 			pvt->dcsm1[cs] = 0;
+		}
 	}
 }
 
@@ -1049,18 +1016,16 @@
 {
 	enum mem_type type;
 
-	if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= OPTERON_CPU_REV_F) {
-		/* Rev F and later */
-		type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
+	if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) {
+		if (pvt->dchr0 & DDR3_MODE)
+			type = (pvt->dclr0 & BIT(16)) ?	MEM_DDR3 : MEM_RDDR3;
+		else
+			type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
 	} else {
-		/* Rev E and earlier */
 		type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
 	}
 
-	debugf1("  Memory type is: %s\n",
-		(type == MEM_DDR2) ? "MEM_DDR2" :
-		(type == MEM_RDDR2) ? "MEM_RDDR2" :
-		(type == MEM_DDR) ? "MEM_DDR" : "MEM_RDDR");
+	debugf1("  Memory type is: %s\n", edac_mem_types[type]);
 
 	return type;
 }
@@ -1078,11 +1043,11 @@
 {
 	int flag, err = 0;
 
-	err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
+	err = amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
 	if (err)
 		return err;
 
-	if ((boot_cpu_data.x86_model >> 4) >= OPTERON_CPU_REV_F) {
+	if ((boot_cpu_data.x86_model >> 4) >= K8_REV_F) {
 		/* RevF (NPT) and later */
 		flag = pvt->dclr0 & F10_WIDTH_128;
 	} else {
@@ -1114,22 +1079,15 @@
 {
 	u32 low;
 	u32 off = dram << 3;	/* 8 bytes between DRAM entries */
-	int err;
 
-	err = pci_read_config_dword(pvt->addr_f1_ctl,
-				    K8_DRAM_BASE_LOW + off, &low);
-	if (err)
-		debugf0("Reading K8_DRAM_BASE_LOW failed\n");
+	amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_BASE_LOW + off, &low);
 
 	/* Extract parts into separate data entries */
 	pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
 	pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
 	pvt->dram_rw_en[dram] = (low & 0x3);
 
-	err = pci_read_config_dword(pvt->addr_f1_ctl,
-				    K8_DRAM_LIMIT_LOW + off, &low);
-	if (err)
-		debugf0("Reading K8_DRAM_LIMIT_LOW failed\n");
+	amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_LIMIT_LOW + off, &low);
 
 	/*
 	 * Extract parts into separate data entries. Limit is the HIGHEST memory
@@ -1142,7 +1100,7 @@
 
 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
 					struct err_regs *info,
-					u64 SystemAddress)
+					u64 sys_addr)
 {
 	struct mem_ctl_info *src_mci;
 	unsigned short syndrome;
@@ -1155,7 +1113,7 @@
 
 	/* CHIPKILL enabled */
 	if (info->nbcfg & K8_NBCFG_CHIPKILL) {
-		channel = get_channel_from_ecc_syndrome(syndrome);
+		channel = get_channel_from_ecc_syndrome(mci, syndrome);
 		if (channel < 0) {
 			/*
 			 * Syndrome didn't map, so we don't know which of the
@@ -1177,64 +1135,46 @@
 		 * was obtained from email communication with someone at AMD.
 		 * (Wish the email was placed in this comment - norsk)
 		 */
-		channel = ((SystemAddress & BIT(3)) != 0);
+		channel = ((sys_addr & BIT(3)) != 0);
 	}
 
 	/*
 	 * Find out which node the error address belongs to. This may be
 	 * different from the node that detected the error.
 	 */
-	src_mci = find_mc_by_sys_addr(mci, SystemAddress);
+	src_mci = find_mc_by_sys_addr(mci, sys_addr);
 	if (!src_mci) {
 		amd64_mc_printk(mci, KERN_ERR,
 			     "failed to map error address 0x%lx to a node\n",
-			     (unsigned long)SystemAddress);
+			     (unsigned long)sys_addr);
 		edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
 		return;
 	}
 
-	/* Now map the SystemAddress to a CSROW */
-	csrow = sys_addr_to_csrow(src_mci, SystemAddress);
+	/* Now map the sys_addr to a CSROW */
+	csrow = sys_addr_to_csrow(src_mci, sys_addr);
 	if (csrow < 0) {
 		edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
 	} else {
-		error_address_to_page_and_offset(SystemAddress, &page, &offset);
+		error_address_to_page_and_offset(sys_addr, &page, &offset);
 
 		edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
 				  channel, EDAC_MOD_STR);
 	}
 }
 
-/*
- * determrine the number of PAGES in for this DIMM's size based on its DRAM
- * Address Mapping.
- *
- * First step is to calc the number of bits to shift a value of 1 left to
- * indicate show many pages. Start with the DBAM value as the starting bits,
- * then proceed to adjust those shift bits, based on CPU rev and the table.
- * See BKDG on the DBAM
- */
-static int k8_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map)
+static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
 {
-	int nr_pages;
+	int *dbam_map;
 
-	if (pvt->ext_model >= OPTERON_CPU_REV_F) {
-		nr_pages = 1 << (revf_quad_ddr2_shift[dram_map] - PAGE_SHIFT);
-	} else {
-		/*
-		 * RevE and less section; this line is tricky. It collapses the
-		 * table used by RevD and later to one that matches revisions CG
-		 * and earlier.
-		 */
-		dram_map -= (pvt->ext_model >= OPTERON_CPU_REV_D) ?
-				(dram_map > 8 ? 4 : (dram_map > 5 ?
-				3 : (dram_map > 2 ? 1 : 0))) : 0;
+	if (pvt->ext_model >= K8_REV_F)
+		dbam_map = ddr2_dbam;
+	else if (pvt->ext_model >= K8_REV_D)
+		dbam_map = ddr2_dbam_revD;
+	else
+		dbam_map = ddr2_dbam_revCG;
 
-		/* 25 shift is 32MiB minimum DIMM size in RevE and prior */
-		nr_pages = 1 << (dram_map + 25 - PAGE_SHIFT);
-	}
-
-	return nr_pages;
+	return dbam_map[cs_mode];
 }
 
 /*
@@ -1248,34 +1188,24 @@
 static int f10_early_channel_count(struct amd64_pvt *pvt)
 {
 	int dbams[] = { DBAM0, DBAM1 };
-	int err = 0, channels = 0;
-	int i, j;
+	int i, j, channels = 0;
 	u32 dbam;
 
-	err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
-	if (err)
-		goto err_reg;
-
-	err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1);
-	if (err)
-		goto err_reg;
-
 	/* If we are in 128 bit mode, then we are using 2 channels */
 	if (pvt->dclr0 & F10_WIDTH_128) {
-		debugf0("Data WIDTH is 128 bits - 2 channels\n");
 		channels = 2;
 		return channels;
 	}
 
 	/*
-	 * Need to check if in UN-ganged mode: In such, there are 2 channels,
-	 * but they are NOT in 128 bit mode and thus the above 'dcl0' status bit
-	 * will be OFF.
+	 * Need to check if in unganged mode: In such, there are 2 channels,
+	 * but they are not in 128 bit mode and thus the above 'dclr0' status
+	 * bit will be OFF.
 	 *
 	 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
 	 * their CSEnable bit on. If so, then SINGLE DIMM case.
 	 */
-	debugf0("Data WIDTH is NOT 128 bits - need more decoding\n");
+	debugf0("Data width is not 128 bits - need more decoding\n");
 
 	/*
 	 * Check DRAM Bank Address Mapping values for each DIMM to see if there
@@ -1283,8 +1213,7 @@
 	 * both controllers since DIMMs can be placed in either one.
 	 */
 	for (i = 0; i < ARRAY_SIZE(dbams); i++) {
-		err = pci_read_config_dword(pvt->dram_f2_ctl, dbams[i], &dbam);
-		if (err)
+		if (amd64_read_pci_cfg(pvt->dram_f2_ctl, dbams[i], &dbam))
 			goto err_reg;
 
 		for (j = 0; j < 4; j++) {
@@ -1295,6 +1224,9 @@
 		}
 	}
 
+	if (channels > 2)
+		channels = 2;
+
 	debugf0("MCT channel count: %d\n", channels);
 
 	return channels;
@@ -1304,9 +1236,16 @@
 
 }
 
-static int f10_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map)
+static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
 {
-	return 1 << (revf_quad_ddr2_shift[dram_map] - PAGE_SHIFT);
+	int *dbam_map;
+
+	if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
+		dbam_map = ddr3_dbam;
+	else
+		dbam_map = ddr2_dbam;
+
+	return dbam_map[cs_mode];
 }
 
 /* Enable extended configuration access via 0xCF8 feature */
@@ -1314,7 +1253,7 @@
 {
 	u32 reg;
 
-	pci_read_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
+	amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
 
 	pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG);
 	reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
@@ -1326,7 +1265,7 @@
 {
 	u32 reg;
 
-	pci_read_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
+	amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
 
 	reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG;
 	if (pvt->flags.cf8_extcfg)
@@ -1355,10 +1294,10 @@
 	high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
 
 	/* read the 'raw' DRAM BASE Address register */
-	pci_read_config_dword(pvt->addr_f1_ctl, low_offset, &low_base);
+	amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_base);
 
 	/* Read from the ECS data register */
-	pci_read_config_dword(pvt->addr_f1_ctl, high_offset, &high_base);
+	amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_base);
 
 	/* Extract parts into separate data entries */
 	pvt->dram_rw_en[dram] = (low_base & 0x3);
@@ -1375,13 +1314,10 @@
 	high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
 
 	/* read the 'raw' LIMIT registers */
-	pci_read_config_dword(pvt->addr_f1_ctl, low_offset, &low_limit);
+	amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_limit);
 
 	/* Read from the ECS data register for the HIGH portion */
-	pci_read_config_dword(pvt->addr_f1_ctl, high_offset, &high_limit);
-
-	debugf0("  HW Regs: BASE=0x%08x-%08x      LIMIT=  0x%08x-%08x\n",
-		high_base, low_base, high_limit, low_limit);
+	amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_limit);
 
 	pvt->dram_DstNode[dram] = (low_limit & 0x7);
 	pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
@@ -1397,32 +1333,35 @@
 
 static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
 {
-	int err = 0;
 
-	err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW,
-				    &pvt->dram_ctl_select_low);
-	if (err) {
-		debugf0("Reading F10_DCTL_SEL_LOW failed\n");
-	} else {
-		debugf0("DRAM_DCTL_SEL_LOW=0x%x  DctSelBaseAddr=0x%x\n",
-			pvt->dram_ctl_select_low, dct_sel_baseaddr(pvt));
+	if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW,
+				&pvt->dram_ctl_select_low)) {
+		debugf0("F2x110 (DCTL Sel. Low): 0x%08x, "
+			"High range addresses at: 0x%x\n",
+			pvt->dram_ctl_select_low,
+			dct_sel_baseaddr(pvt));
 
-		debugf0("  DRAM DCTs are=%s DRAM Is=%s DRAM-Ctl-"
-				"sel-hi-range=%s\n",
-			(dct_ganging_enabled(pvt) ? "GANGED" : "NOT GANGED"),
-			(dct_dram_enabled(pvt) ? "Enabled"   : "Disabled"),
-			(dct_high_range_enabled(pvt) ? "Enabled" : "Disabled"));
+		debugf0("  DCT mode: %s, All DCTs on: %s\n",
+			(dct_ganging_enabled(pvt) ? "ganged" : "unganged"),
+			(dct_dram_enabled(pvt) ? "yes"   : "no"));
 
-		debugf0("  DctDatIntLv=%s MemCleared=%s DctSelIntLvAddr=0x%x\n",
-			(dct_data_intlv_enabled(pvt) ? "Enabled" : "Disabled"),
-			(dct_memory_cleared(pvt) ? "True " : "False "),
+		if (!dct_ganging_enabled(pvt))
+			debugf0("  Address range split per DCT: %s\n",
+				(dct_high_range_enabled(pvt) ? "yes" : "no"));
+
+		debugf0("  DCT data interleave for ECC: %s, "
+			"DRAM cleared since last warm reset: %s\n",
+			(dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
+			(dct_memory_cleared(pvt) ? "yes" : "no"));
+
+		debugf0("  DCT channel interleave: %s, "
+			"DCT interleave bits selector: 0x%x\n",
+			(dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
 			dct_sel_interleave_addr(pvt));
 	}
 
-	err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH,
-				    &pvt->dram_ctl_select_high);
-	if (err)
-		debugf0("Reading F10_DCTL_SEL_HIGH failed\n");
+	amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH,
+			   &pvt->dram_ctl_select_high);
 }
 
 /*
@@ -1706,10 +1645,11 @@
 }
 
 /*
- * This the F10h reference code from AMD to map a @sys_addr to NodeID,
- * CSROW, Channel.
+ * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
+ * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
  *
- * The @sys_addr is usually an error address received from the hardware.
+ * The @sys_addr is usually an error address received from the hardware
+ * (MCX_ADDR).
  */
 static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
 				     struct err_regs *info,
@@ -1722,136 +1662,79 @@
 
 	csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
 
-	if (csrow >= 0) {
-		error_address_to_page_and_offset(sys_addr, &page, &offset);
-
-		syndrome  = HIGH_SYNDROME(info->nbsl) << 8;
-		syndrome |= LOW_SYNDROME(info->nbsh);
-
-		/*
-		 * Is CHIPKILL on? If so, then we can attempt to use the
-		 * syndrome to isolate which channel the error was on.
-		 */
-		if (pvt->nbcfg & K8_NBCFG_CHIPKILL)
-			chan = get_channel_from_ecc_syndrome(syndrome);
-
-		if (chan >= 0) {
-			edac_mc_handle_ce(mci, page, offset, syndrome,
-					csrow, chan, EDAC_MOD_STR);
-		} else {
-			/*
-			 * Channel unknown, report all channels on this
-			 * CSROW as failed.
-			 */
-			for (chan = 0; chan < mci->csrows[csrow].nr_channels;
-								chan++) {
-					edac_mc_handle_ce(mci, page, offset,
-							syndrome,
-							csrow, chan,
-							EDAC_MOD_STR);
-			}
-		}
-
-	} else {
+	if (csrow < 0) {
 		edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+		return;
 	}
+
+	error_address_to_page_and_offset(sys_addr, &page, &offset);
+
+	syndrome  = HIGH_SYNDROME(info->nbsl) << 8;
+	syndrome |= LOW_SYNDROME(info->nbsh);
+
+	/*
+	 * We need the syndromes for channel detection only when we're
+	 * ganged. Otherwise @chan should already contain the channel at
+	 * this point.
+	 */
+	if (dct_ganging_enabled(pvt) && pvt->nbcfg & K8_NBCFG_CHIPKILL)
+		chan = get_channel_from_ecc_syndrome(mci, syndrome);
+
+	if (chan >= 0)
+		edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan,
+				  EDAC_MOD_STR);
+	else
+		/*
+		 * Channel unknown, report all channels on this CSROW as failed.
+		 */
+		for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
+			edac_mc_handle_ce(mci, page, offset, syndrome,
+					  csrow, chan, EDAC_MOD_STR);
 }
 
 /*
- * Input (@index) is the DBAM DIMM value (1 of 4) used as an index into a shift
- * table (revf_quad_ddr2_shift) which starts at 128MB DIMM size. Index of 0
- * indicates an empty DIMM slot, as reported by Hardware on empty slots.
- *
- * Normalize to 128MB by subracting 27 bit shift.
- */
-static int map_dbam_to_csrow_size(int index)
-{
-	int mega_bytes = 0;
-
-	if (index > 0 && index <= DBAM_MAX_VALUE)
-		mega_bytes = ((128 << (revf_quad_ddr2_shift[index]-27)));
-
-	return mega_bytes;
-}
-
-/*
- * debug routine to display the memory sizes of a DIMM (ganged or not) and it
+ * debug routine to display the memory sizes of all logical DIMMs and its
  * CSROWs as well
  */
-static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt,
-					 int ganged)
+static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
 {
 	int dimm, size0, size1;
 	u32 dbam;
 	u32 *dcsb;
 
-	debugf1("  dbam%d: 0x%8.08x  CSROW is %s\n", ctrl,
-			ctrl ? pvt->dbam1 : pvt->dbam0,
-			ganged ? "GANGED - dbam1 not used" : "NON-GANGED");
+	if (boot_cpu_data.x86 == 0xf) {
+		/* K8 families < revF not supported yet */
+	       if (pvt->ext_model < K8_REV_F)
+			return;
+	       else
+		       WARN_ON(ctrl != 0);
+	}
+
+	debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
+		ctrl, ctrl ? pvt->dbam1 : pvt->dbam0);
 
 	dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
 	dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
 
+	edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
+
 	/* Dump memory sizes for DIMM and its CSROWs */
 	for (dimm = 0; dimm < 4; dimm++) {
 
 		size0 = 0;
 		if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
-			size0 = map_dbam_to_csrow_size(DBAM_DIMM(dimm, dbam));
+			size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
 
 		size1 = 0;
 		if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
-			size1 = map_dbam_to_csrow_size(DBAM_DIMM(dimm, dbam));
+			size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
 
-		debugf1("     CTRL-%d DIMM-%d=%5dMB   CSROW-%d=%5dMB "
-				"CSROW-%d=%5dMB\n",
-				ctrl,
-				dimm,
-				size0 + size1,
-				dimm * 2,
-				size0,
-				dimm * 2 + 1,
-				size1);
+		edac_printk(KERN_DEBUG, EDAC_MC, " %d: %5dMB %d: %5dMB\n",
+			    dimm * 2, size0, dimm * 2 + 1, size1);
 	}
 }
 
 /*
- * Very early hardware probe on pci_probe thread to determine if this module
- * supports the hardware.
- *
- * Return:
- *      0 for OK
- *      1 for error
- */
-static int f10_probe_valid_hardware(struct amd64_pvt *pvt)
-{
-	int ret = 0;
-
-	/*
-	 * If we are on a DDR3 machine, we don't know yet if
-	 * we support that properly at this time
-	 */
-	if ((pvt->dchr0 & F10_DCHR_Ddr3Mode) ||
-	    (pvt->dchr1 & F10_DCHR_Ddr3Mode)) {
-
-		amd64_printk(KERN_WARNING,
-			"%s() This machine is running with DDR3 memory. "
-			"This is not currently supported. "
-			"DCHR0=0x%x DCHR1=0x%x\n",
-			__func__, pvt->dchr0, pvt->dchr1);
-
-		amd64_printk(KERN_WARNING,
-			"   Contact '%s' module MAINTAINER to help add"
-			" support.\n",
-			EDAC_MOD_STR);
-
-		ret = 1;
-
-	}
-	return ret;
-}
-
-/*
  * There currently are 3 types type of MC devices for AMD Athlon/Opterons
  * (as per PCI DEVICE_IDs):
  *
@@ -1868,11 +1751,11 @@
 		.addr_f1_ctl = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
 		.misc_f3_ctl = PCI_DEVICE_ID_AMD_K8_NB_MISC,
 		.ops = {
-			.early_channel_count = k8_early_channel_count,
-			.get_error_address = k8_get_error_address,
-			.read_dram_base_limit = k8_read_dram_base_limit,
-			.map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
-			.dbam_map_to_pages = k8_dbam_map_to_pages,
+			.early_channel_count	= k8_early_channel_count,
+			.get_error_address	= k8_get_error_address,
+			.read_dram_base_limit	= k8_read_dram_base_limit,
+			.map_sysaddr_to_csrow	= k8_map_sysaddr_to_csrow,
+			.dbam_to_cs		= k8_dbam_to_chip_select,
 		}
 	},
 	[F10_CPUS] = {
@@ -1880,13 +1763,12 @@
 		.addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP,
 		.misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC,
 		.ops = {
-			.probe_valid_hardware = f10_probe_valid_hardware,
-			.early_channel_count = f10_early_channel_count,
-			.get_error_address = f10_get_error_address,
-			.read_dram_base_limit = f10_read_dram_base_limit,
-			.read_dram_ctl_register = f10_read_dram_ctl_register,
-			.map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
-			.dbam_map_to_pages = f10_dbam_map_to_pages,
+			.early_channel_count	= f10_early_channel_count,
+			.get_error_address	= f10_get_error_address,
+			.read_dram_base_limit	= f10_read_dram_base_limit,
+			.read_dram_ctl_register	= f10_read_dram_ctl_register,
+			.map_sysaddr_to_csrow	= f10_map_sysaddr_to_csrow,
+			.dbam_to_cs		= f10_dbam_to_chip_select,
 		}
 	},
 	[F11_CPUS] = {
@@ -1894,13 +1776,12 @@
 		.addr_f1_ctl = PCI_DEVICE_ID_AMD_11H_NB_MAP,
 		.misc_f3_ctl = PCI_DEVICE_ID_AMD_11H_NB_MISC,
 		.ops = {
-			.probe_valid_hardware = f10_probe_valid_hardware,
-			.early_channel_count = f10_early_channel_count,
-			.get_error_address = f10_get_error_address,
-			.read_dram_base_limit = f10_read_dram_base_limit,
-			.read_dram_ctl_register = f10_read_dram_ctl_register,
-			.map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
-			.dbam_map_to_pages = f10_dbam_map_to_pages,
+			.early_channel_count	= f10_early_channel_count,
+			.get_error_address	= f10_get_error_address,
+			.read_dram_base_limit	= f10_read_dram_base_limit,
+			.read_dram_ctl_register	= f10_read_dram_ctl_register,
+			.map_sysaddr_to_csrow	= f10_map_sysaddr_to_csrow,
+			.dbam_to_cs		= f10_dbam_to_chip_select,
 		}
 	},
 };
@@ -1923,142 +1804,170 @@
 }
 
 /*
- * syndrome mapping table for ECC ChipKill devices
+ * These are tables of eigenvectors (one per line) which can be used for the
+ * construction of the syndrome tables. The modified syndrome search algorithm
+ * uses those to find the symbol in error and thus the DIMM.
  *
- * The comment in each row is the token (nibble) number that is in error.
- * The least significant nibble of the syndrome is the mask for the bits
- * that are in error (need to be toggled) for the particular nibble.
- *
- * Each row contains 16 entries.
- * The first entry (0th) is the channel number for that row of syndromes.
- * The remaining 15 entries are the syndromes for the respective Error
- * bit mask index.
- *
- * 1st index entry is 0x0001 mask, indicating that the rightmost bit is the
- * bit in error.
- * The 2nd index entry is 0x0010 that the second bit is damaged.
- * The 3rd index entry is 0x0011 indicating that the rightmost 2 bits
- * are damaged.
- * Thus so on until index 15, 0x1111, whose entry has the syndrome
- * indicating that all 4 bits are damaged.
- *
- * A search is performed on this table looking for a given syndrome.
- *
- * See the AMD documentation for ECC syndromes. This ECC table is valid
- * across all the versions of the AMD64 processors.
- *
- * A fast lookup is to use the LAST four bits of the 16-bit syndrome as a
- * COLUMN index, then search all ROWS of that column, looking for a match
- * with the input syndrome. The ROW value will be the token number.
- *
- * The 0'th entry on that row, can be returned as the CHANNEL (0 or 1) of this
- * error.
+ * Algorithm courtesy of Ross LaFetra from AMD.
  */
-#define NUMBER_ECC_ROWS  36
-static const unsigned short ecc_chipkill_syndromes[NUMBER_ECC_ROWS][16] = {
-	/* Channel 0 syndromes */
-	{/*0*/  0, 0xe821, 0x7c32, 0x9413, 0xbb44, 0x5365, 0xc776, 0x2f57,
-	   0xdd88, 0x35a9, 0xa1ba, 0x499b, 0x66cc, 0x8eed, 0x1afe, 0xf2df },
-	{/*1*/  0, 0x5d31, 0xa612, 0xfb23, 0x9584, 0xc8b5, 0x3396, 0x6ea7,
-	   0xeac8, 0xb7f9, 0x4cda, 0x11eb, 0x7f4c, 0x227d, 0xd95e, 0x846f },
-	{/*2*/  0, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
-	   0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f },
-	{/*3*/  0, 0x2021, 0x3032, 0x1013, 0x4044, 0x6065, 0x7076, 0x5057,
-	   0x8088, 0xa0a9, 0xb0ba, 0x909b, 0xc0cc, 0xe0ed, 0xf0fe, 0xd0df },
-	{/*4*/  0, 0x5041, 0xa082, 0xf0c3, 0x9054, 0xc015, 0x30d6, 0x6097,
-	   0xe0a8, 0xb0e9, 0x402a, 0x106b, 0x70fc, 0x20bd, 0xd07e, 0x803f },
-	{/*5*/  0, 0xbe21, 0xd732, 0x6913, 0x2144, 0x9f65, 0xf676, 0x4857,
-	   0x3288, 0x8ca9, 0xe5ba, 0x5b9b, 0x13cc, 0xaded, 0xc4fe, 0x7adf },
-	{/*6*/  0, 0x4951, 0x8ea2, 0xc7f3, 0x5394, 0x1ac5, 0xdd36, 0x9467,
-	   0xa1e8, 0xe8b9, 0x2f4a, 0x661b, 0xf27c, 0xbb2d, 0x7cde, 0x358f },
-	{/*7*/  0, 0x74e1, 0x9872, 0xec93, 0xd6b4, 0xa255, 0x4ec6, 0x3a27,
-	   0x6bd8, 0x1f39, 0xf3aa, 0x874b, 0xbd6c, 0xc98d, 0x251e, 0x51ff },
-	{/*8*/  0, 0x15c1, 0x2a42, 0x3f83, 0xcef4, 0xdb35, 0xe4b6, 0xf177,
-	   0x4758, 0x5299, 0x6d1a, 0x78db, 0x89ac, 0x9c6d, 0xa3ee, 0xb62f },
-	{/*9*/  0, 0x3d01, 0x1602, 0x2b03, 0x8504, 0xb805, 0x9306, 0xae07,
-	   0xca08, 0xf709, 0xdc0a, 0xe10b, 0x4f0c, 0x720d, 0x590e, 0x640f },
-	{/*a*/  0, 0x9801, 0xec02, 0x7403, 0x6b04, 0xf305, 0x8706, 0x1f07,
-	   0xbd08, 0x2509, 0x510a, 0xc90b, 0xd60c, 0x4e0d, 0x3a0e, 0xa20f },
-	{/*b*/  0, 0xd131, 0x6212, 0xb323, 0x3884, 0xe9b5, 0x5a96, 0x8ba7,
-	   0x1cc8, 0xcdf9, 0x7eda, 0xafeb, 0x244c, 0xf57d, 0x465e, 0x976f },
-	{/*c*/  0, 0xe1d1, 0x7262, 0x93b3, 0xb834, 0x59e5, 0xca56, 0x2b87,
-	   0xdc18, 0x3dc9, 0xae7a, 0x4fab, 0x542c, 0x85fd, 0x164e, 0xf79f },
-	{/*d*/  0, 0x6051, 0xb0a2, 0xd0f3, 0x1094, 0x70c5, 0xa036, 0xc067,
-	   0x20e8, 0x40b9, 0x904a, 0x601b, 0x307c, 0x502d, 0x80de, 0xe08f },
-	{/*e*/  0, 0xa4c1, 0xf842, 0x5c83, 0xe6f4, 0x4235, 0x1eb6, 0xba77,
-	   0x7b58, 0xdf99, 0x831a, 0x27db, 0x9dac, 0x396d, 0x65ee, 0xc12f },
-	{/*f*/  0, 0x11c1, 0x2242, 0x3383, 0xc8f4, 0xd935, 0xeab6, 0xfb77,
-	   0x4c58, 0x5d99, 0x6e1a, 0x7fdb, 0x84ac, 0x956d, 0xa6ee, 0xb72f },
-
-	/* Channel 1 syndromes */
-	{/*10*/ 1, 0x45d1, 0x8a62, 0xcfb3, 0x5e34, 0x1be5, 0xd456, 0x9187,
-	   0xa718, 0xe2c9, 0x2d7a, 0x68ab, 0xf92c, 0xbcfd, 0x734e, 0x369f },
-	{/*11*/ 1, 0x63e1, 0xb172, 0xd293, 0x14b4, 0x7755, 0xa5c6, 0xc627,
-	   0x28d8, 0x4b39, 0x99aa, 0xfa4b, 0x3c6c, 0x5f8d, 0x8d1e, 0xeeff },
-	{/*12*/ 1, 0xb741, 0xd982, 0x6ec3, 0x2254, 0x9515, 0xfbd6, 0x4c97,
-	   0x33a8, 0x84e9, 0xea2a, 0x5d6b, 0x11fc, 0xa6bd, 0xc87e, 0x7f3f },
-	{/*13*/ 1, 0xdd41, 0x6682, 0xbbc3, 0x3554, 0xe815, 0x53d6, 0xce97,
-	   0x1aa8, 0xc7e9, 0x7c2a, 0xa1fb, 0x2ffc, 0xf2bd, 0x497e, 0x943f },
-	{/*14*/ 1, 0x2bd1, 0x3d62, 0x16b3, 0x4f34, 0x64e5, 0x7256, 0x5987,
-	   0x8518, 0xaec9, 0xb87a, 0x93ab, 0xca2c, 0xe1fd, 0xf74e, 0xdc9f },
-	{/*15*/ 1, 0x83c1, 0xc142, 0x4283, 0xa4f4, 0x2735, 0x65b6, 0xe677,
-	   0xf858, 0x7b99, 0x391a, 0xbadb, 0x5cac, 0xdf6d, 0x9dee, 0x1e2f },
-	{/*16*/ 1, 0x8fd1, 0xc562, 0x4ab3, 0xa934, 0x26e5, 0x6c56, 0xe387,
-	   0xfe18, 0x71c9, 0x3b7a, 0xb4ab, 0x572c, 0xd8fd, 0x924e, 0x1d9f },
-	{/*17*/ 1, 0x4791, 0x89e2, 0xce73, 0x5264, 0x15f5, 0xdb86, 0x9c17,
-	   0xa3b8, 0xe429, 0x2a5a, 0x6dcb, 0xf1dc, 0xb64d, 0x783e, 0x3faf },
-	{/*18*/ 1, 0x5781, 0xa9c2, 0xfe43, 0x92a4, 0xc525, 0x3b66, 0x6ce7,
-	   0xe3f8, 0xb479, 0x4a3a, 0x1dbb, 0x715c, 0x26dd, 0xd89e, 0x8f1f },
-	{/*19*/ 1, 0xbf41, 0xd582, 0x6ac3, 0x2954, 0x9615, 0xfcd6, 0x4397,
-	   0x3ea8, 0x81e9, 0xeb2a, 0x546b, 0x17fc, 0xa8bd, 0xc27e, 0x7d3f },
-	{/*1a*/ 1, 0x9891, 0xe1e2, 0x7273, 0x6464, 0xf7f5, 0x8586, 0x1617,
-	   0xb8b8, 0x2b29, 0x595a, 0xcacb, 0xdcdc, 0x4f4d, 0x3d3e, 0xaeaf },
-	{/*1b*/ 1, 0xcce1, 0x4472, 0x8893, 0xfdb4, 0x3f55, 0xb9c6, 0x7527,
-	   0x56d8, 0x9a39, 0x12aa, 0xde4b, 0xab6c, 0x678d, 0xef1e, 0x23ff },
-	{/*1c*/ 1, 0xa761, 0xf9b2, 0x5ed3, 0xe214, 0x4575, 0x1ba6, 0xbcc7,
-	   0x7328, 0xd449, 0x8a9a, 0x2dfb, 0x913c, 0x365d, 0x688e, 0xcfef },
-	{/*1d*/ 1, 0xff61, 0x55b2, 0xaad3, 0x7914, 0x8675, 0x2ca6, 0xd3c7,
-	   0x9e28, 0x6149, 0xcb9a, 0x34fb, 0xe73c, 0x185d, 0xb28e, 0x4def },
-	{/*1e*/ 1, 0x5451, 0xa8a2, 0xfcf3, 0x9694, 0xc2c5, 0x3e36, 0x6a67,
-	   0xebe8, 0xbfb9, 0x434a, 0x171b, 0x7d7c, 0x292d, 0xd5de, 0x818f },
-	{/*1f*/ 1, 0x6fc1, 0xb542, 0xda83, 0x19f4, 0x7635, 0xacb6, 0xc377,
-	   0x2e58, 0x4199, 0x9b1a, 0xf4db, 0x37ac, 0x586d, 0x82ee, 0xed2f },
-
-	/* ECC bits are also in the set of tokens and they too can go bad
-	 * first 2 cover channel 0, while the second 2 cover channel 1
-	 */
-	{/*20*/ 0, 0xbe01, 0xd702, 0x6903, 0x2104, 0x9f05, 0xf606, 0x4807,
-	   0x3208, 0x8c09, 0xe50a, 0x5b0b, 0x130c, 0xad0d, 0xc40e, 0x7a0f },
-	{/*21*/ 0, 0x4101, 0x8202, 0xc303, 0x5804, 0x1905, 0xda06, 0x9b07,
-	   0xac08, 0xed09, 0x2e0a, 0x6f0b, 0x640c, 0xb50d, 0x760e, 0x370f },
-	{/*22*/ 1, 0xc441, 0x4882, 0x8cc3, 0xf654, 0x3215, 0xbed6, 0x7a97,
-	   0x5ba8, 0x9fe9, 0x132a, 0xd76b, 0xadfc, 0x69bd, 0xe57e, 0x213f },
-	{/*23*/ 1, 0x7621, 0x9b32, 0xed13, 0xda44, 0xac65, 0x4176, 0x3757,
-	   0x6f88, 0x19a9, 0xf4ba, 0x829b, 0xb5cc, 0xc3ed, 0x2efe, 0x58df }
+static u16 x4_vectors[] = {
+	0x2f57, 0x1afe, 0x66cc, 0xdd88,
+	0x11eb, 0x3396, 0x7f4c, 0xeac8,
+	0x0001, 0x0002, 0x0004, 0x0008,
+	0x1013, 0x3032, 0x4044, 0x8088,
+	0x106b, 0x30d6, 0x70fc, 0xe0a8,
+	0x4857, 0xc4fe, 0x13cc, 0x3288,
+	0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
+	0x1f39, 0x251e, 0xbd6c, 0x6bd8,
+	0x15c1, 0x2a42, 0x89ac, 0x4758,
+	0x2b03, 0x1602, 0x4f0c, 0xca08,
+	0x1f07, 0x3a0e, 0x6b04, 0xbd08,
+	0x8ba7, 0x465e, 0x244c, 0x1cc8,
+	0x2b87, 0x164e, 0x642c, 0xdc18,
+	0x40b9, 0x80de, 0x1094, 0x20e8,
+	0x27db, 0x1eb6, 0x9dac, 0x7b58,
+	0x11c1, 0x2242, 0x84ac, 0x4c58,
+	0x1be5, 0x2d7a, 0x5e34, 0xa718,
+	0x4b39, 0x8d1e, 0x14b4, 0x28d8,
+	0x4c97, 0xc87e, 0x11fc, 0x33a8,
+	0x8e97, 0x497e, 0x2ffc, 0x1aa8,
+	0x16b3, 0x3d62, 0x4f34, 0x8518,
+	0x1e2f, 0x391a, 0x5cac, 0xf858,
+	0x1d9f, 0x3b7a, 0x572c, 0xfe18,
+	0x15f5, 0x2a5a, 0x5264, 0xa3b8,
+	0x1dbb, 0x3b66, 0x715c, 0xe3f8,
+	0x4397, 0xc27e, 0x17fc, 0x3ea8,
+	0x1617, 0x3d3e, 0x6464, 0xb8b8,
+	0x23ff, 0x12aa, 0xab6c, 0x56d8,
+	0x2dfb, 0x1ba6, 0x913c, 0x7328,
+	0x185d, 0x2ca6, 0x7914, 0x9e28,
+	0x171b, 0x3e36, 0x7d7c, 0xebe8,
+	0x4199, 0x82ee, 0x19f4, 0x2e58,
+	0x4807, 0xc40e, 0x130c, 0x3208,
+	0x1905, 0x2e0a, 0x5804, 0xac08,
+	0x213f, 0x132a, 0xadfc, 0x5ba8,
+	0x19a9, 0x2efe, 0xb5cc, 0x6f88,
 };
 
-/*
- * Given the syndrome argument, scan each of the channel tables for a syndrome
- * match. Depending on which table it is found, return the channel number.
- */
-static int get_channel_from_ecc_syndrome(unsigned short syndrome)
+static u16 x8_vectors[] = {
+	0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
+	0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
+	0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
+	0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
+	0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
+	0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
+	0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
+	0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
+	0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
+	0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
+	0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
+	0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
+	0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
+	0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
+	0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
+	0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
+	0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
+	0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
+	0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
+};
+
+static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs,
+				 int v_dim)
 {
-	int row;
-	int column;
+	unsigned int i, err_sym;
 
-	/* Determine column to scan */
-	column = syndrome & 0xF;
+	for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
+		u16 s = syndrome;
+		int v_idx =  err_sym * v_dim;
+		int v_end = (err_sym + 1) * v_dim;
 
-	/* Scan all rows, looking for syndrome, or end of table */
-	for (row = 0; row < NUMBER_ECC_ROWS; row++) {
-		if (ecc_chipkill_syndromes[row][column] == syndrome)
-			return ecc_chipkill_syndromes[row][0];
+		/* walk over all 16 bits of the syndrome */
+		for (i = 1; i < (1U << 16); i <<= 1) {
+
+			/* if bit is set in that eigenvector... */
+			if (v_idx < v_end && vectors[v_idx] & i) {
+				u16 ev_comp = vectors[v_idx++];
+
+				/* ... and bit set in the modified syndrome, */
+				if (s & i) {
+					/* remove it. */
+					s ^= ev_comp;
+
+					if (!s)
+						return err_sym;
+				}
+
+			} else if (s & i)
+				/* can't get to zero, move to next symbol */
+				break;
+		}
 	}
 
 	debugf0("syndrome(%x) not found\n", syndrome);
 	return -1;
 }
 
+static int map_err_sym_to_channel(int err_sym, int sym_size)
+{
+	if (sym_size == 4)
+		switch (err_sym) {
+		case 0x20:
+		case 0x21:
+			return 0;
+			break;
+		case 0x22:
+		case 0x23:
+			return 1;
+			break;
+		default:
+			return err_sym >> 4;
+			break;
+		}
+	/* x8 symbols */
+	else
+		switch (err_sym) {
+		/* imaginary bits not in a DIMM */
+		case 0x10:
+			WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
+					  err_sym);
+			return -1;
+			break;
+
+		case 0x11:
+			return 0;
+			break;
+		case 0x12:
+			return 1;
+			break;
+		default:
+			return err_sym >> 3;
+			break;
+		}
+	return -1;
+}
+
+static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
+{
+	struct amd64_pvt *pvt = mci->pvt_info;
+	u32 value = 0;
+	int err_sym = 0;
+
+	amd64_read_pci_cfg(pvt->misc_f3_ctl, 0x180, &value);
+
+	/* F3x180[EccSymbolSize]=1, x8 symbols */
+	if (boot_cpu_data.x86 == 0x10 &&
+	    boot_cpu_data.x86_model > 7 &&
+	    value & BIT(25)) {
+		err_sym = decode_syndrome(syndrome, x8_vectors,
+					  ARRAY_SIZE(x8_vectors), 8);
+		return map_err_sym_to_channel(err_sym, 8);
+	} else {
+		err_sym = decode_syndrome(syndrome, x4_vectors,
+					  ARRAY_SIZE(x4_vectors), 4);
+		return map_err_sym_to_channel(err_sym, 4);
+	}
+}
+
 /*
  * Check for valid error in the NB Status High register. If so, proceed to read
  * NB Status Low, NB Address Low and NB Address High registers and store data
@@ -2073,40 +1982,24 @@
 {
 	struct amd64_pvt *pvt;
 	struct pci_dev *misc_f3_ctl;
-	int err = 0;
 
 	pvt = mci->pvt_info;
 	misc_f3_ctl = pvt->misc_f3_ctl;
 
-	err = pci_read_config_dword(misc_f3_ctl, K8_NBSH, &regs->nbsh);
-	if (err)
-		goto err_reg;
+	if (amd64_read_pci_cfg(misc_f3_ctl, K8_NBSH, &regs->nbsh))
+		return 0;
 
 	if (!(regs->nbsh & K8_NBSH_VALID_BIT))
 		return 0;
 
 	/* valid error, read remaining error information registers */
-	err = pci_read_config_dword(misc_f3_ctl, K8_NBSL, &regs->nbsl);
-	if (err)
-		goto err_reg;
-
-	err = pci_read_config_dword(misc_f3_ctl, K8_NBEAL, &regs->nbeal);
-	if (err)
-		goto err_reg;
-
-	err = pci_read_config_dword(misc_f3_ctl, K8_NBEAH, &regs->nbeah);
-	if (err)
-		goto err_reg;
-
-	err = pci_read_config_dword(misc_f3_ctl, K8_NBCFG, &regs->nbcfg);
-	if (err)
-		goto err_reg;
+	if (amd64_read_pci_cfg(misc_f3_ctl, K8_NBSL, &regs->nbsl) ||
+	    amd64_read_pci_cfg(misc_f3_ctl, K8_NBEAL, &regs->nbeal) ||
+	    amd64_read_pci_cfg(misc_f3_ctl, K8_NBEAH, &regs->nbeah) ||
+	    amd64_read_pci_cfg(misc_f3_ctl, K8_NBCFG, &regs->nbcfg))
+		return 0;
 
 	return 1;
-
-err_reg:
-	debugf0("Reading error info register failed\n");
-	return 0;
 }
 
 /*
@@ -2184,7 +2077,7 @@
 			    struct err_regs *info)
 {
 	struct amd64_pvt *pvt = mci->pvt_info;
-	u64 SystemAddress;
+	u64 sys_addr;
 
 	/* Ensure that the Error Address is VALID */
 	if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
@@ -2194,22 +2087,23 @@
 		return;
 	}
 
-	SystemAddress = extract_error_address(mci, info);
+	sys_addr = pvt->ops->get_error_address(mci, info);
 
 	amd64_mc_printk(mci, KERN_ERR,
-		"CE ERROR_ADDRESS= 0x%llx\n", SystemAddress);
+		"CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
 
-	pvt->ops->map_sysaddr_to_csrow(mci, info, SystemAddress);
+	pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr);
 }
 
 /* Handle any Un-correctable Errors (UEs) */
 static void amd64_handle_ue(struct mem_ctl_info *mci,
 			    struct err_regs *info)
 {
-	int csrow;
-	u64 SystemAddress;
-	u32 page, offset;
+	struct amd64_pvt *pvt = mci->pvt_info;
 	struct mem_ctl_info *log_mci, *src_mci = NULL;
+	int csrow;
+	u64 sys_addr;
+	u32 page, offset;
 
 	log_mci = mci;
 
@@ -2220,31 +2114,31 @@
 		return;
 	}
 
-	SystemAddress = extract_error_address(mci, info);
+	sys_addr = pvt->ops->get_error_address(mci, info);
 
 	/*
 	 * Find out which node the error address belongs to. This may be
 	 * different from the node that detected the error.
 	 */
-	src_mci = find_mc_by_sys_addr(mci, SystemAddress);
+	src_mci = find_mc_by_sys_addr(mci, sys_addr);
 	if (!src_mci) {
 		amd64_mc_printk(mci, KERN_CRIT,
 			"ERROR ADDRESS (0x%lx) value NOT mapped to a MC\n",
-			(unsigned long)SystemAddress);
+			(unsigned long)sys_addr);
 		edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
 		return;
 	}
 
 	log_mci = src_mci;
 
-	csrow = sys_addr_to_csrow(log_mci, SystemAddress);
+	csrow = sys_addr_to_csrow(log_mci, sys_addr);
 	if (csrow < 0) {
 		amd64_mc_printk(mci, KERN_CRIT,
 			"ERROR_ADDRESS (0x%lx) value NOT mapped to 'csrow'\n",
-			(unsigned long)SystemAddress);
+			(unsigned long)sys_addr);
 		edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
 	} else {
-		error_address_to_page_and_offset(SystemAddress, &page, &offset);
+		error_address_to_page_and_offset(sys_addr, &page, &offset);
 		edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
 	}
 }
@@ -2384,30 +2278,26 @@
 static void amd64_read_mc_registers(struct amd64_pvt *pvt)
 {
 	u64 msr_val;
-	int dram, err = 0;
+	int dram;
 
 	/*
 	 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
 	 * those are Read-As-Zero
 	 */
-	rdmsrl(MSR_K8_TOP_MEM1, msr_val);
-	pvt->top_mem = msr_val >> 23;
-	debugf0("  TOP_MEM=0x%08llx\n", pvt->top_mem);
+	rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
+	debugf0("  TOP_MEM:  0x%016llx\n", pvt->top_mem);
 
 	/* check first whether TOP_MEM2 is enabled */
 	rdmsrl(MSR_K8_SYSCFG, msr_val);
 	if (msr_val & (1U << 21)) {
-		rdmsrl(MSR_K8_TOP_MEM2, msr_val);
-		pvt->top_mem2 = msr_val >> 23;
-		debugf0("  TOP_MEM2=0x%08llx\n", pvt->top_mem2);
+		rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
+		debugf0("  TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
 	} else
 		debugf0("  TOP_MEM2 disabled.\n");
 
 	amd64_cpu_display_info(pvt);
 
-	err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap);
-	if (err)
-		goto err_reg;
+	amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap);
 
 	if (pvt->ops->read_dram_ctl_register)
 		pvt->ops->read_dram_ctl_register(pvt);
@@ -2425,13 +2315,12 @@
 		 * debug output block away.
 		 */
 		if (pvt->dram_rw_en[dram] != 0) {
-			debugf1("  DRAM_BASE[%d]: 0x%8.08x-%8.08x "
-				"DRAM_LIMIT:  0x%8.08x-%8.08x\n",
+			debugf1("  DRAM-BASE[%d]: 0x%016llx "
+				"DRAM-LIMIT:  0x%016llx\n",
 				dram,
-				(u32)(pvt->dram_base[dram] >> 32),
-				(u32)(pvt->dram_base[dram] & 0xFFFFFFFF),
-				(u32)(pvt->dram_limit[dram] >> 32),
-				(u32)(pvt->dram_limit[dram] & 0xFFFFFFFF));
+				pvt->dram_base[dram],
+				pvt->dram_limit[dram]);
+
 			debugf1("        IntlvEn=%s %s %s "
 				"IntlvSel=%d DstNode=%d\n",
 				pvt->dram_IntlvEn[dram] ?
@@ -2445,44 +2334,20 @@
 
 	amd64_read_dct_base_mask(pvt);
 
-	err = pci_read_config_dword(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar);
-	if (err)
-		goto err_reg;
-
+	amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar);
 	amd64_read_dbam_reg(pvt);
 
-	err = pci_read_config_dword(pvt->misc_f3_ctl,
-				F10_ONLINE_SPARE, &pvt->online_spare);
-	if (err)
-		goto err_reg;
+	amd64_read_pci_cfg(pvt->misc_f3_ctl,
+			   F10_ONLINE_SPARE, &pvt->online_spare);
 
-	err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
-	if (err)
-		goto err_reg;
-
-	err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0);
-	if (err)
-		goto err_reg;
+	amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
+	amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0);
 
 	if (!dct_ganging_enabled(pvt)) {
-		err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_1,
-						&pvt->dclr1);
-		if (err)
-			goto err_reg;
-
-		err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCHR_1,
-						&pvt->dchr1);
-		if (err)
-			goto err_reg;
+		amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1);
+		amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_1, &pvt->dchr1);
 	}
-
 	amd64_dump_misc_regs(pvt);
-
-	return;
-
-err_reg:
-	debugf0("Reading an MC register failed\n");
-
 }
 
 /*
@@ -2521,7 +2386,7 @@
  */
 static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
 {
-	u32 dram_map, nr_pages;
+	u32 cs_mode, nr_pages;
 
 	/*
 	 * The math on this doesn't look right on the surface because x/2*4 can
@@ -2530,9 +2395,9 @@
 	 * number of bits to shift the DBAM register to extract the proper CSROW
 	 * field.
 	 */
-	dram_map = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
+	cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
 
-	nr_pages = pvt->ops->dbam_map_to_pages(pvt, dram_map);
+	nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT);
 
 	/*
 	 * If dual channel then double the memory size of single channel.
@@ -2540,7 +2405,7 @@
 	 */
 	nr_pages <<= (pvt->channel_count - 1);
 
-	debugf0("  (csrow=%d) DBAM map index= %d\n", csrow_nr, dram_map);
+	debugf0("  (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
 	debugf0("    nr_pages= %u  channel-count = %d\n",
 		nr_pages, pvt->channel_count);
 
@@ -2556,13 +2421,11 @@
 	struct csrow_info *csrow;
 	struct amd64_pvt *pvt;
 	u64 input_addr_min, input_addr_max, sys_addr;
-	int i, err = 0, empty = 1;
+	int i, empty = 1;
 
 	pvt = mci->pvt_info;
 
-	err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg);
-	if (err)
-		debugf0("Reading K8_NBCFG failed\n");
+	amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg);
 
 	debugf0("NBCFG= 0x%x  CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg,
 		(pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
@@ -2618,6 +2481,109 @@
 	return empty;
 }
 
+/* get all cores on this DCT */
+static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
+{
+	int cpu;
+
+	for_each_online_cpu(cpu)
+		if (amd_get_nb_id(cpu) == nid)
+			cpumask_set_cpu(cpu, mask);
+}
+
+/* check MCG_CTL on all the cpus on this node */
+static bool amd64_nb_mce_bank_enabled_on_node(int nid)
+{
+	cpumask_var_t mask;
+	struct msr *msrs;
+	int cpu, nbe, idx = 0;
+	bool ret = false;
+
+	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
+		amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
+			     __func__);
+		return false;
+	}
+
+	get_cpus_on_this_dct_cpumask(mask, nid);
+
+	msrs = kzalloc(sizeof(struct msr) * cpumask_weight(mask), GFP_KERNEL);
+	if (!msrs) {
+		amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
+			      __func__);
+		free_cpumask_var(mask);
+		 return false;
+	}
+
+	rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
+
+	for_each_cpu(cpu, mask) {
+		nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
+
+		debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
+			cpu, msrs[idx].q,
+			(nbe ? "enabled" : "disabled"));
+
+		if (!nbe)
+			goto out;
+
+		idx++;
+	}
+	ret = true;
+
+out:
+	kfree(msrs);
+	free_cpumask_var(mask);
+	return ret;
+}
+
+static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
+{
+	cpumask_var_t cmask;
+	struct msr *msrs = NULL;
+	int cpu, idx = 0;
+
+	if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
+		amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
+			     __func__);
+		return false;
+	}
+
+	get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
+
+	msrs = kzalloc(sizeof(struct msr) * cpumask_weight(cmask), GFP_KERNEL);
+	if (!msrs) {
+		amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
+			     __func__);
+		return -ENOMEM;
+	}
+
+	rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
+
+	for_each_cpu(cpu, cmask) {
+
+		if (on) {
+			if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
+				pvt->flags.ecc_report = 1;
+
+			msrs[idx].l |= K8_MSR_MCGCTL_NBE;
+		} else {
+			/*
+			 * Turn off ECC reporting only when it was off before
+			 */
+			if (!pvt->flags.ecc_report)
+				msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
+		}
+		idx++;
+	}
+	wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
+
+	kfree(msrs);
+	free_cpumask_var(cmask);
+
+	return 0;
+}
+
 /*
  * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we"
  * enable it.
@@ -2625,24 +2591,16 @@
 static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
 {
 	struct amd64_pvt *pvt = mci->pvt_info;
-	const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
-	int cpu, idx = 0, err = 0;
-	struct msr msrs[cpumask_weight(cpumask)];
-	u32 value;
-	u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+	u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
 
 	if (!ecc_enable_override)
 		return;
 
-	memset(msrs, 0, sizeof(msrs));
-
 	amd64_printk(KERN_WARNING,
 		"'ecc_enable_override' parameter is active, "
 		"Enabling AMD ECC hardware now: CAUTION\n");
 
-	err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
-	if (err)
-		debugf0("Reading K8_NBCTL failed\n");
+	amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value);
 
 	/* turn on UECCn and CECCEn bits */
 	pvt->old_nbctl = value & mask;
@@ -2651,20 +2609,11 @@
 	value |= mask;
 	pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
 
-	rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
+	if (amd64_toggle_ecc_err_reporting(pvt, ON))
+		amd64_printk(KERN_WARNING, "Error enabling ECC reporting over "
+					   "MCGCTL!\n");
 
-	for_each_cpu(cpu, cpumask) {
-		if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
-			set_bit(idx, &pvt->old_mcgctl);
-
-		msrs[idx].l |= K8_MSR_MCGCTL_NBE;
-		idx++;
-	}
-	wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
-
-	err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
-	if (err)
-		debugf0("Reading K8_NBCFG failed\n");
+	amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
 
 	debugf0("NBCFG(1)= 0x%x  CHIPKILL= %s ECC_ENABLE= %s\n", value,
 		(value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
@@ -2679,9 +2628,7 @@
 		value |= K8_NBCFG_ECC_ENABLE;
 		pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
 
-		err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
-		if (err)
-			debugf0("Reading K8_NBCFG failed\n");
+		amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
 
 		if (!(value & K8_NBCFG_ECC_ENABLE)) {
 			amd64_printk(KERN_WARNING,
@@ -2701,86 +2648,21 @@
 
 static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
 {
-	const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
-	int cpu, idx = 0, err = 0;
-	struct msr msrs[cpumask_weight(cpumask)];
-	u32 value;
-	u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+	u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
 
 	if (!pvt->nbctl_mcgctl_saved)
 		return;
 
-	memset(msrs, 0, sizeof(msrs));
-
-	err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
-	if (err)
-		debugf0("Reading K8_NBCTL failed\n");
+	amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value);
 	value &= ~mask;
 	value |= pvt->old_nbctl;
 
 	/* restore the NB Enable MCGCTL bit */
 	pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
 
-	rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
-
-	for_each_cpu(cpu, cpumask) {
-		msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
-		msrs[idx].l |=
-			test_bit(idx, &pvt->old_mcgctl) << K8_MSR_MCGCTL_NBE;
-		idx++;
-	}
-
-	wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
-}
-
-/* get all cores on this DCT */
-static void get_cpus_on_this_dct_cpumask(cpumask_t *mask, int nid)
-{
-	int cpu;
-
-	for_each_online_cpu(cpu)
-		if (amd_get_nb_id(cpu) == nid)
-			cpumask_set_cpu(cpu, mask);
-}
-
-/* check MCG_CTL on all the cpus on this node */
-static bool amd64_nb_mce_bank_enabled_on_node(int nid)
-{
-	cpumask_t mask;
-	struct msr *msrs;
-	int cpu, nbe, idx = 0;
-	bool ret = false;
-
-	cpumask_clear(&mask);
-
-	get_cpus_on_this_dct_cpumask(&mask, nid);
-
-	msrs = kzalloc(sizeof(struct msr) * cpumask_weight(&mask), GFP_KERNEL);
-	if (!msrs) {
-		amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
-			      __func__);
-		 return false;
-	}
-
-	rdmsr_on_cpus(&mask, MSR_IA32_MCG_CTL, msrs);
-
-	for_each_cpu(cpu, &mask) {
-		nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
-
-		debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
-			cpu, msrs[idx].q,
-			(nbe ? "enabled" : "disabled"));
-
-		if (!nbe)
-			goto out;
-
-		idx++;
-	}
-	ret = true;
-
-out:
-	kfree(msrs);
-	return ret;
+	if (amd64_toggle_ecc_err_reporting(pvt, OFF))
+		amd64_printk(KERN_WARNING, "Error restoring ECC reporting over "
+					   "MCGCTL!\n");
 }
 
 /*
@@ -2797,13 +2679,10 @@
 static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
 {
 	u32 value;
-	int err = 0;
 	u8 ecc_enabled = 0;
 	bool nb_mce_en = false;
 
-	err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
-	if (err)
-		debugf0("Reading K8_NBCTL failed\n");
+	amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
 
 	ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
 	if (!ecc_enabled)
@@ -2909,7 +2788,6 @@
 	pvt->ext_model		= boot_cpu_data.x86_model >> 4;
 	pvt->mc_type_index	= mc_type_index;
 	pvt->ops		= family_ops(mc_type_index);
-	pvt->old_mcgctl		= 0;
 
 	/*
 	 * We have the dram_f2_ctl device as an argument, now go reserve its
@@ -2959,17 +2837,10 @@
 {
 	int node_id = pvt->mc_node_id;
 	struct mem_ctl_info *mci;
-	int ret, err = 0;
+	int ret = -ENODEV;
 
 	amd64_read_mc_registers(pvt);
 
-	ret = -ENODEV;
-	if (pvt->ops->probe_valid_hardware) {
-		err = pvt->ops->probe_valid_hardware(pvt);
-		if (err)
-			goto err_exit;
-	}
-
 	/*
 	 * We need to determine how many memory channels there are. Then use
 	 * that information for calculating the size of the dynamic instance
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index c6f359a..41bc561 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -129,24 +129,22 @@
  *         sections 3.5.4 and 3.5.5 for more information.
  */
 
-#define EDAC_AMD64_VERSION		" Ver: 3.2.0 " __DATE__
+#define EDAC_AMD64_VERSION		" Ver: 3.3.0 " __DATE__
 #define EDAC_MOD_STR			"amd64_edac"
 
 #define EDAC_MAX_NUMNODES		8
 
 /* Extended Model from CPUID, for CPU Revision numbers */
-#define OPTERON_CPU_LE_REV_C		0
-#define OPTERON_CPU_REV_D		1
-#define OPTERON_CPU_REV_E		2
-
-/* NPT processors have the following Extended Models */
-#define OPTERON_CPU_REV_F		4
-#define OPTERON_CPU_REV_FA		5
+#define K8_REV_D			1
+#define K8_REV_E			2
+#define K8_REV_F			4
 
 /* Hardware limit on ChipSelect rows per MC and processors per system */
 #define MAX_CS_COUNT			8
 #define DRAM_REG_COUNT			8
 
+#define ON true
+#define OFF false
 
 /*
  * PCI-defined configuration space registers
@@ -241,7 +239,7 @@
 #define F10_DCHR_1			0x194
 
 #define F10_DCHR_FOUR_RANK_DIMM		BIT(18)
-#define F10_DCHR_Ddr3Mode		BIT(8)
+#define DDR3_MODE			BIT(8)
 #define F10_DCHR_MblMode		BIT(6)
 
 
@@ -382,14 +380,9 @@
 #define K8_NBCAP_CORES			(BIT(12)|BIT(13))
 #define K8_NBCAP_CHIPKILL		BIT(4)
 #define K8_NBCAP_SECDED			BIT(3)
-#define K8_NBCAP_8_NODE			BIT(2)
-#define K8_NBCAP_DUAL_NODE		BIT(1)
 #define K8_NBCAP_DCT_DUAL		BIT(0)
 
-/*
- * MSR Regs
- */
-#define K8_MSR_MCGCTL			0x017b
+/* MSRs */
 #define K8_MSR_MCGCTL_NBE		BIT(4)
 
 #define K8_MSR_MC4CTL			0x0410
@@ -487,7 +480,6 @@
 	/* Save old hw registers' values before we modified them */
 	u32 nbctl_mcgctl_saved;		/* When true, following 2 are valid */
 	u32 old_nbctl;
-	unsigned long old_mcgctl;	/* per core on this node */
 
 	/* MC Type Index value: socket F vs Family 10h */
 	u32 mc_type_index;
@@ -495,6 +487,7 @@
 	/* misc settings */
 	struct flags {
 		unsigned long cf8_extcfg:1;
+		unsigned long ecc_report:1;
 	} flags;
 };
 
@@ -504,7 +497,6 @@
 };
 
 extern struct scrubrate scrubrates[23];
-extern u32 revf_quad_ddr2_shift[16];
 extern const char *tt_msgs[4];
 extern const char *ll_msgs[4];
 extern const char *rrrr_msgs[16];
@@ -534,17 +526,15 @@
  * functions and per device encoding/decoding logic.
  */
 struct low_ops {
-	int (*probe_valid_hardware)(struct amd64_pvt *pvt);
-	int (*early_channel_count)(struct amd64_pvt *pvt);
+	int (*early_channel_count)	(struct amd64_pvt *pvt);
 
-	u64 (*get_error_address)(struct mem_ctl_info *mci,
-			struct err_regs *info);
-	void (*read_dram_base_limit)(struct amd64_pvt *pvt, int dram);
-	void (*read_dram_ctl_register)(struct amd64_pvt *pvt);
-	void (*map_sysaddr_to_csrow)(struct mem_ctl_info *mci,
-					struct err_regs *info,
-					u64 SystemAddr);
-	int (*dbam_map_to_pages)(struct amd64_pvt *pvt, int dram_map);
+	u64 (*get_error_address)	(struct mem_ctl_info *mci,
+					 struct err_regs *info);
+	void (*read_dram_base_limit)	(struct amd64_pvt *pvt, int dram);
+	void (*read_dram_ctl_register)	(struct amd64_pvt *pvt);
+	void (*map_sysaddr_to_csrow)	(struct mem_ctl_info *mci,
+					 struct err_regs *info, u64 SystemAddr);
+	int (*dbam_to_cs)		(struct amd64_pvt *pvt, int cs_mode);
 };
 
 struct amd64_family_type {
@@ -566,6 +556,22 @@
 	return &amd64_family_types[index].ops;
 }
 
+static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
+					   u32 *val, const char *func)
+{
+	int err = 0;
+
+	err = pci_read_config_dword(pdev, offset, val);
+	if (err)
+		amd64_printk(KERN_WARNING, "%s: error reading F%dx%x.\n",
+			     func, PCI_FUNC(pdev->devfn), offset);
+
+	return err;
+}
+
+#define amd64_read_pci_cfg(pdev, offset, val)	\
+	amd64_read_pci_cfg_dword(pdev, offset, val, __func__)
+
 /*
  * For future CPU versions, verify the following as new 'slow' rates appear and
  * modify the necessary skip values for the supported CPU.
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 12f355c..001b2e7 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -74,6 +74,7 @@
 
 #ifdef CONFIG_EDAC_DEBUG
 extern int edac_debug_level;
+extern const char *edac_mem_types[];
 
 #ifndef CONFIG_EDAC_DEBUG_VERBOSE
 #define edac_debug_printk(level, fmt, arg...)                           \
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index b629c41..3630308 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -76,6 +76,30 @@
 	debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
 }
 
+/*
+ * keep those in sync with the enum mem_type
+ */
+const char *edac_mem_types[] = {
+	"Empty csrow",
+	"Reserved csrow type",
+	"Unknown csrow type",
+	"Fast page mode RAM",
+	"Extended data out RAM",
+	"Burst Extended data out RAM",
+	"Single data rate SDRAM",
+	"Registered single data rate SDRAM",
+	"Double data rate SDRAM",
+	"Registered Double data rate SDRAM",
+	"Rambus DRAM",
+	"Unbuffered DDR2 RAM",
+	"Fully buffered DDR2",
+	"Registered DDR2 RAM",
+	"Rambus XDR",
+	"Unbuffered DDR3 RAM",
+	"Registered DDR3 RAM",
+};
+EXPORT_SYMBOL_GPL(edac_mem_types);
+
 #endif				/* CONFIG_EDAC_DEBUG */
 
 /* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
index 689cc6a..c693fcc 100644
--- a/drivers/edac/edac_mce_amd.c
+++ b/drivers/edac/edac_mce_amd.c
@@ -306,7 +306,7 @@
 	 * value encoding has changed so interpret those differently
 	 */
 	if ((boot_cpu_data.x86 == 0x10) &&
-	    (boot_cpu_data.x86_model > 8)) {
+	    (boot_cpu_data.x86_model > 7)) {
 		if (regs->nbsh & K8_NBSH_ERR_CPU_VAL)
 			pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf));
 	} else {
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index ae4556f..96768e1 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2218,6 +2218,13 @@
 	page     = payload >> PAGE_SHIFT;
 	offset   = payload & ~PAGE_MASK;
 	rest     = p->payload_length;
+	/*
+	 * The controllers I've tested have not worked correctly when
+	 * second_req_count is zero.  Rather than do something we know won't
+	 * work, return an error
+	 */
+	if (rest == 0)
+		return -EINVAL;
 
 	/* FIXME: make packet-per-buffer/dual-buffer a context option */
 	while (rest > 0) {
@@ -2271,7 +2278,7 @@
 					unsigned long payload)
 {
 	struct iso_context *ctx = container_of(base, struct iso_context, base);
-	struct descriptor *d = NULL, *pd = NULL;
+	struct descriptor *d, *pd;
 	struct fw_iso_packet *p = packet;
 	dma_addr_t d_bus, page_bus;
 	u32 z, header_z, rest;
@@ -2309,8 +2316,9 @@
 		d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
 
 		rest = payload_per_buffer;
+		pd = d;
 		for (j = 1; j < z; j++) {
-			pd = d + j;
+			pd++;
 			pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
 						  DESCRIPTOR_INPUT_MORE);
 
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 3c8827a..470ef67 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -15,7 +15,7 @@
 
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 
-drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o
+drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
 
 obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
 
@@ -31,3 +31,5 @@
 obj-$(CONFIG_DRM_SIS)   += sis/
 obj-$(CONFIG_DRM_SAVAGE)+= savage/
 obj-$(CONFIG_DRM_VIA)	+=via/
+obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
+obj-y			+= i2c/
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 3f7c500..5124401 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -125,6 +125,15 @@
 DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
 		 drm_tv_subconnector_enum_list)
 
+static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
+	{ DRM_MODE_DIRTY_OFF,      "Off"      },
+	{ DRM_MODE_DIRTY_ON,       "On"       },
+	{ DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
+};
+
+DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
+		 drm_dirty_info_enum_list)
+
 struct drm_conn_prop_enum_list {
 	int type;
 	char *name;
@@ -247,7 +256,8 @@
 	mutex_unlock(&dev->mode_config.idr_mutex);
 }
 
-void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type)
+struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+		uint32_t id, uint32_t type)
 {
 	struct drm_mode_object *obj = NULL;
 
@@ -802,6 +812,36 @@
 EXPORT_SYMBOL(drm_mode_create_dithering_property);
 
 /**
+ * drm_mode_create_dirty_property - create dirty property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_dirty_info_property(struct drm_device *dev)
+{
+	struct drm_property *dirty_info;
+	int i;
+
+	if (dev->mode_config.dirty_info_property)
+		return 0;
+
+	dirty_info =
+		drm_property_create(dev, DRM_MODE_PROP_ENUM |
+				    DRM_MODE_PROP_IMMUTABLE,
+				    "dirty",
+				    ARRAY_SIZE(drm_dirty_info_enum_list));
+	for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++)
+		drm_property_add_enum(dirty_info, i,
+				      drm_dirty_info_enum_list[i].type,
+				      drm_dirty_info_enum_list[i].name);
+	dev->mode_config.dirty_info_property = dirty_info;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
+
+/**
  * drm_mode_config_init - initialize DRM mode_configuration structure
  * @dev: DRM device
  *
@@ -1753,6 +1793,71 @@
 	return ret;
 }
 
+int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+			   void *data, struct drm_file *file_priv)
+{
+	struct drm_clip_rect __user *clips_ptr;
+	struct drm_clip_rect *clips = NULL;
+	struct drm_mode_fb_dirty_cmd *r = data;
+	struct drm_mode_object *obj;
+	struct drm_framebuffer *fb;
+	unsigned flags;
+	int num_clips;
+	int ret = 0;
+
+	mutex_lock(&dev->mode_config.mutex);
+	obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
+	if (!obj) {
+		DRM_ERROR("invalid framebuffer id\n");
+		ret = -EINVAL;
+		goto out_err1;
+	}
+	fb = obj_to_fb(obj);
+
+	num_clips = r->num_clips;
+	clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
+
+	if (!num_clips != !clips_ptr) {
+		ret = -EINVAL;
+		goto out_err1;
+	}
+
+	flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
+
+	/* If userspace annotates copy, clips must come in pairs */
+	if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
+		ret = -EINVAL;
+		goto out_err1;
+	}
+
+	if (num_clips && clips_ptr) {
+		clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+		if (!clips) {
+			ret = -ENOMEM;
+			goto out_err1;
+		}
+
+		ret = copy_from_user(clips, clips_ptr,
+				     num_clips * sizeof(*clips));
+		if (ret)
+			goto out_err2;
+	}
+
+	if (fb->funcs->dirty) {
+		ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips);
+	} else {
+		ret = -ENOSYS;
+		goto out_err2;
+	}
+
+out_err2:
+	kfree(clips);
+out_err1:
+	mutex_unlock(&dev->mode_config.mutex);
+	return ret;
+}
+
+
 /**
  * drm_fb_release - remove and free the FBs on this file
  * @filp: file * from the ioctl
@@ -2478,3 +2583,72 @@
 	mutex_unlock(&dev->mode_config.mutex);
 	return ret;
 }
+
+int drm_mode_page_flip_ioctl(struct drm_device *dev,
+			     void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_crtc_page_flip *page_flip = data;
+	struct drm_mode_object *obj;
+	struct drm_crtc *crtc;
+	struct drm_framebuffer *fb;
+	struct drm_pending_vblank_event *e = NULL;
+	unsigned long flags;
+	int ret = -EINVAL;
+
+	if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
+	    page_flip->reserved != 0)
+		return -EINVAL;
+
+	mutex_lock(&dev->mode_config.mutex);
+	obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
+	if (!obj)
+		goto out;
+	crtc = obj_to_crtc(obj);
+
+	if (crtc->funcs->page_flip == NULL)
+		goto out;
+
+	obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB);
+	if (!obj)
+		goto out;
+	fb = obj_to_fb(obj);
+
+	if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+		ret = -ENOMEM;
+		spin_lock_irqsave(&dev->event_lock, flags);
+		if (file_priv->event_space < sizeof e->event) {
+			spin_unlock_irqrestore(&dev->event_lock, flags);
+			goto out;
+		}
+		file_priv->event_space -= sizeof e->event;
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+
+		e = kzalloc(sizeof *e, GFP_KERNEL);
+		if (e == NULL) {
+			spin_lock_irqsave(&dev->event_lock, flags);
+			file_priv->event_space += sizeof e->event;
+			spin_unlock_irqrestore(&dev->event_lock, flags);
+			goto out;
+		}
+
+		e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
+		e->event.base.length = sizeof e->event;
+		e->event.user_data = page_flip->user_data;
+		e->base.event = &e->event.base;
+		e->base.file_priv = file_priv;
+		e->base.destroy =
+			(void (*) (struct drm_pending_event *)) kfree;
+	}
+
+	ret = crtc->funcs->page_flip(crtc, fb, e);
+	if (ret) {
+		spin_lock_irqsave(&dev->event_lock, flags);
+		file_priv->event_space += sizeof e->event;
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		kfree(e);
+	}
+
+out:
+	mutex_unlock(&dev->mode_config.mutex);
+	return ret;
+}
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index bbfd110..4231d6d 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -109,7 +109,7 @@
 
 	count = (*connector_funcs->get_modes)(connector);
 	if (!count) {
-		count = drm_add_modes_noedid(connector, 800, 600);
+		count = drm_add_modes_noedid(connector, 1024, 768);
 		if (!count)
 			return 0;
 	}
@@ -1020,6 +1020,9 @@
 {
 	int count = 0;
 
+	/* disable all the possible outputs/crtcs before entering KMS mode */
+	drm_helper_disable_unused_functions(dev);
+
 	drm_fb_helper_parse_command_line(dev);
 
 	count = drm_helper_probe_connector_modes(dev,
diff --git a/drivers/gpu/drm/i915/intel_dp_i2c.c b/drivers/gpu/drm/drm_dp_i2c_helper.c
similarity index 78%
rename from drivers/gpu/drm/i915/intel_dp_i2c.c
rename to drivers/gpu/drm/drm_dp_i2c_helper.c
index a63b6f5..548887c 100644
--- a/drivers/gpu/drm/i915/intel_dp_i2c.c
+++ b/drivers/gpu/drm/drm_dp_i2c_helper.c
@@ -28,84 +28,20 @@
 #include <linux/errno.h>
 #include <linux/sched.h>
 #include <linux/i2c.h>
-#include "intel_dp.h"
+#include "drm_dp_helper.h"
 #include "drmP.h"
 
 /* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
-
-#define MODE_I2C_START	1
-#define MODE_I2C_WRITE	2
-#define MODE_I2C_READ	4
-#define MODE_I2C_STOP	8
-
 static int
 i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
 			    uint8_t write_byte, uint8_t *read_byte)
 {
 	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
-	uint16_t address = algo_data->address;
-	uint8_t msg[5];
-	uint8_t reply[2];
-	int msg_bytes;
-	int reply_bytes;
 	int ret;
-
-	/* Set up the command byte */
-	if (mode & MODE_I2C_READ)
-		msg[0] = AUX_I2C_READ << 4;
-	else
-		msg[0] = AUX_I2C_WRITE << 4;
-
-	if (!(mode & MODE_I2C_STOP))
-		msg[0] |= AUX_I2C_MOT << 4;
-
-	msg[1] = address >> 8;
-	msg[2] = address;
-
-	switch (mode) {
-	case MODE_I2C_WRITE:
-		msg[3] = 0;
-		msg[4] = write_byte;
-		msg_bytes = 5;
-		reply_bytes = 1;
-		break;
-	case MODE_I2C_READ:
-		msg[3] = 0;
-		msg_bytes = 4;
-		reply_bytes = 2;
-		break;
-	default:
-		msg_bytes = 3;
-		reply_bytes = 1;
-		break;
-	}
-
-	for (;;) {
-		ret = (*algo_data->aux_ch)(adapter,
-					   msg, msg_bytes,
-					   reply, reply_bytes);
-		if (ret < 0) {
-			DRM_DEBUG("aux_ch failed %d\n", ret);
-			return ret;
-		}
-		switch (reply[0] & AUX_I2C_REPLY_MASK) {
-		case AUX_I2C_REPLY_ACK:
-			if (mode == MODE_I2C_READ) {
-				*read_byte = reply[1];
-			}
-			return reply_bytes - 1;
-		case AUX_I2C_REPLY_NACK:
-			DRM_DEBUG("aux_ch nack\n");
-			return -EREMOTEIO;
-		case AUX_I2C_REPLY_DEFER:
-			DRM_DEBUG("aux_ch defer\n");
-			udelay(100);
-			break;
-		default:
-			DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
-			return -EREMOTEIO;
-		}
-	}
+	
+	ret = (*algo_data->aux_ch)(adapter, mode,
+				   write_byte, read_byte);
+	return ret;
 }
 
 /*
@@ -224,7 +160,7 @@
 	if (ret >= 0)
 		ret = num;
 	i2c_algo_dp_aux_stop(adapter, reading);
-	DRM_DEBUG("dp_aux_xfer return %d\n", ret);
+	DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index a75ca63..ff2f104 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -145,6 +145,8 @@
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW)
 };
 
 #define DRM_CORE_IOCTL_COUNT	ARRAY_SIZE( drm_ioctls )
@@ -366,6 +368,29 @@
 module_exit(drm_core_exit);
 
 /**
+ * Copy and IOCTL return string to user space
+ */
+static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
+{
+	int len;
+
+	/* don't overflow userbuf */
+	len = strlen(value);
+	if (len > *buf_len)
+		len = *buf_len;
+
+	/* let userspace know exact length of driver value (which could be
+	 * larger than the userspace-supplied buffer) */
+	*buf_len = strlen(value);
+
+	/* finally, try filling in the userbuf */
+	if (len && buf)
+		if (copy_to_user(buf, value, len))
+			return -EFAULT;
+	return 0;
+}
+
+/**
  * Get version information
  *
  * \param inode device inode.
@@ -380,16 +405,21 @@
 		       struct drm_file *file_priv)
 {
 	struct drm_version *version = data;
-	int len;
+	int err;
 
 	version->version_major = dev->driver->major;
 	version->version_minor = dev->driver->minor;
 	version->version_patchlevel = dev->driver->patchlevel;
-	DRM_COPY(version->name, dev->driver->name);
-	DRM_COPY(version->date, dev->driver->date);
-	DRM_COPY(version->desc, dev->driver->desc);
+	err = drm_copy_field(version->name, &version->name_len,
+			dev->driver->name);
+	if (!err)
+		err = drm_copy_field(version->date, &version->date_len,
+				dev->driver->date);
+	if (!err)
+		err = drm_copy_field(version->desc, &version->desc_len,
+				dev->driver->desc);
 
-	return 0;
+	return err;
 }
 
 /**
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index b54ba63..c39b26f 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -123,18 +123,20 @@
  */
 static bool edid_is_valid(struct edid *edid)
 {
-	int i;
+	int i, score = 0;
 	u8 csum = 0;
 	u8 *raw_edid = (u8 *)edid;
 
-	if (memcmp(edid->header, edid_header, sizeof(edid_header)))
+	for (i = 0; i < sizeof(edid_header); i++)
+		if (raw_edid[i] == edid_header[i])
+			score++;
+
+	if (score == 8) ;
+	else if (score >= 6) {
+		DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
+		memcpy(raw_edid, edid_header, sizeof(edid_header));
+	} else
 		goto bad;
-	if (edid->version != 1) {
-		DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
-		goto bad;
-	}
-	if (edid->revision > 4)
-		DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
 
 	for (i = 0; i < EDID_LENGTH; i++)
 		csum += raw_edid[i];
@@ -143,6 +145,14 @@
 		goto bad;
 	}
 
+	if (edid->version != 1) {
+		DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+		goto bad;
+	}
+
+	if (edid->revision > 4)
+		DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+
 	return 1;
 
 bad:
@@ -481,16 +491,17 @@
 		   3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
 };
+static const int drm_num_dmt_modes =
+	sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
 
 static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
 			int hsize, int vsize, int fresh)
 {
-	int i, count;
+	int i;
 	struct drm_display_mode *ptr, *mode;
 
-	count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
 	mode = NULL;
-	for (i = 0; i < count; i++) {
+	for (i = 0; i < drm_num_dmt_modes; i++) {
 		ptr = &drm_dmt_modes[i];
 		if (hsize == ptr->hdisplay &&
 			vsize == ptr->vdisplay &&
@@ -834,8 +845,165 @@
 	return modes;
 }
 
+/*
+ * XXX fix this for:
+ * - GTF secondary curve formula
+ * - EDID 1.4 range offsets
+ * - CVT extended bits
+ */
+static bool
+mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
+{
+	struct detailed_data_monitor_range *range;
+	int hsync, vrefresh;
+
+	range = &timing->data.other_data.data.range;
+
+	hsync = drm_mode_hsync(mode);
+	vrefresh = drm_mode_vrefresh(mode);
+
+	if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz)
+		return false;
+
+	if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq)
+		return false;
+
+	if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) {
+		/* be forgiving since it's in units of 10MHz */
+		int max_clock = range->pixel_clock_mhz * 10 + 9;
+		max_clock *= 1000;
+		if (mode->clock > max_clock)
+			return false;
+	}
+
+	return true;
+}
+
+/*
+ * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
+ * need to account for them.
+ */
+static int drm_gtf_modes_for_range(struct drm_connector *connector,
+				   struct detailed_timing *timing)
+{
+	int i, modes = 0;
+	struct drm_display_mode *newmode;
+	struct drm_device *dev = connector->dev;
+
+	for (i = 0; i < drm_num_dmt_modes; i++) {
+		if (mode_in_range(drm_dmt_modes + i, timing)) {
+			newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
+			if (newmode) {
+				drm_mode_probed_add(connector, newmode);
+				modes++;
+			}
+		}
+	}
+
+	return modes;
+}
+
+static int drm_cvt_modes(struct drm_connector *connector,
+			 struct detailed_timing *timing)
+{
+	int i, j, modes = 0;
+	struct drm_display_mode *newmode;
+	struct drm_device *dev = connector->dev;
+	struct cvt_timing *cvt;
+	const int rates[] = { 60, 85, 75, 60, 50 };
+
+	for (i = 0; i < 4; i++) {
+		int width, height;
+		cvt = &(timing->data.other_data.data.cvt[i]);
+
+		height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2;
+		switch (cvt->code[1] & 0xc0) {
+		case 0x00:
+			width = height * 4 / 3;
+			break;
+		case 0x40:
+			width = height * 16 / 9;
+			break;
+		case 0x80:
+			width = height * 16 / 10;
+			break;
+		case 0xc0:
+			width = height * 15 / 9;
+			break;
+		}
+
+		for (j = 1; j < 5; j++) {
+			if (cvt->code[2] & (1 << j)) {
+				newmode = drm_cvt_mode(dev, width, height,
+						       rates[j], j == 0,
+						       false, false);
+				if (newmode) {
+					drm_mode_probed_add(connector, newmode);
+					modes++;
+				}
+			}
+		}
+	}
+
+	return modes;
+}
+
+static int add_detailed_modes(struct drm_connector *connector,
+			      struct detailed_timing *timing,
+			      struct edid *edid, u32 quirks, int preferred)
+{
+	int i, modes = 0;
+	struct detailed_non_pixel *data = &timing->data.other_data;
+	int timing_level = standard_timing_level(edid);
+	int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
+	struct drm_display_mode *newmode;
+	struct drm_device *dev = connector->dev;
+
+	if (timing->pixel_clock) {
+		newmode = drm_mode_detailed(dev, edid, timing, quirks);
+		if (!newmode)
+			return 0;
+
+		if (preferred)
+			newmode->type |= DRM_MODE_TYPE_PREFERRED;
+
+		drm_mode_probed_add(connector, newmode);
+		return 1;
+	}
+
+	/* other timing types */
+	switch (data->type) {
+	case EDID_DETAIL_MONITOR_RANGE:
+		if (gtf)
+			modes += drm_gtf_modes_for_range(connector, timing);
+		break;
+	case EDID_DETAIL_STD_MODES:
+		/* Six modes per detailed section */
+		for (i = 0; i < 6; i++) {
+			struct std_timing *std;
+			struct drm_display_mode *newmode;
+
+			std = &data->data.timings[i];
+			newmode = drm_mode_std(dev, std, edid->revision,
+					       timing_level);
+			if (newmode) {
+				drm_mode_probed_add(connector, newmode);
+				modes++;
+			}
+		}
+		break;
+	case EDID_DETAIL_CVT_3BYTE:
+		modes += drm_cvt_modes(connector, timing);
+		break;
+	default:
+		break;
+	}
+
+	return modes;
+}
+
 /**
- * add_detailed_modes - get detailed mode info from EDID data
+ * add_detailed_info - get detailed mode info from EDID data
  * @connector: attached connector
  * @edid: EDID block to scan
  * @quirks: quirks to apply
@@ -846,67 +1014,24 @@
 static int add_detailed_info(struct drm_connector *connector,
 			     struct edid *edid, u32 quirks)
 {
-	struct drm_device *dev = connector->dev;
-	int i, j, modes = 0;
-	int timing_level;
-
-	timing_level = standard_timing_level(edid);
+	int i, modes = 0;
 
 	for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
 		struct detailed_timing *timing = &edid->detailed_timings[i];
-		struct detailed_non_pixel *data = &timing->data.other_data;
-		struct drm_display_mode *newmode;
+		int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
 
-		/* X server check is version 1.1 or higher */
-		if (edid->version == 1 && edid->revision >= 1 &&
-		    !timing->pixel_clock) {
-			/* Other timing or info */
-			switch (data->type) {
-			case EDID_DETAIL_MONITOR_SERIAL:
-				break;
-			case EDID_DETAIL_MONITOR_STRING:
-				break;
-			case EDID_DETAIL_MONITOR_RANGE:
-				/* Get monitor range data */
-				break;
-			case EDID_DETAIL_MONITOR_NAME:
-				break;
-			case EDID_DETAIL_MONITOR_CPDATA:
-				break;
-			case EDID_DETAIL_STD_MODES:
-				for (j = 0; j < 6; i++) {
-					struct std_timing *std;
-					struct drm_display_mode *newmode;
+		/* In 1.0, only timings are allowed */
+		if (!timing->pixel_clock && edid->version == 1 &&
+			edid->revision == 0)
+			continue;
 
-					std = &data->data.timings[j];
-					newmode = drm_mode_std(dev, std,
-							       edid->revision,
-							       timing_level);
-					if (newmode) {
-						drm_mode_probed_add(connector, newmode);
-						modes++;
-					}
-				}
-				break;
-			default:
-				break;
-			}
-		} else {
-			newmode = drm_mode_detailed(dev, edid, timing, quirks);
-			if (!newmode)
-				continue;
-
-			/* First detailed mode is preferred */
-			if (i == 0 && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING))
-				newmode->type |= DRM_MODE_TYPE_PREFERRED;
-			drm_mode_probed_add(connector, newmode);
-
-			modes++;
-		}
+		modes += add_detailed_modes(connector, timing, edid, quirks,
+					    preferred);
 	}
 
 	return modes;
 }
+
 /**
  * add_detailed_mode_eedid - get detailed mode info from addtional timing
  * 			EDID block
@@ -920,12 +1045,9 @@
 static int add_detailed_info_eedid(struct drm_connector *connector,
 			     struct edid *edid, u32 quirks)
 {
-	struct drm_device *dev = connector->dev;
-	int i, j, modes = 0;
+	int i, modes = 0;
 	char *edid_ext = NULL;
 	struct detailed_timing *timing;
-	struct detailed_non_pixel *data;
-	struct drm_display_mode *newmode;
 	int edid_ext_num;
 	int start_offset, end_offset;
 	int timing_level;
@@ -976,51 +1098,7 @@
 	for (i = start_offset; i < end_offset;
 			i += sizeof(struct detailed_timing)) {
 		timing = (struct detailed_timing *)(edid_ext + i);
-		data = &timing->data.other_data;
-		/* Detailed mode timing */
-		if (timing->pixel_clock) {
-			newmode = drm_mode_detailed(dev, edid, timing, quirks);
-			if (!newmode)
-				continue;
-
-			drm_mode_probed_add(connector, newmode);
-
-			modes++;
-			continue;
-		}
-
-		/* Other timing or info */
-		switch (data->type) {
-		case EDID_DETAIL_MONITOR_SERIAL:
-			break;
-		case EDID_DETAIL_MONITOR_STRING:
-			break;
-		case EDID_DETAIL_MONITOR_RANGE:
-			/* Get monitor range data */
-			break;
-		case EDID_DETAIL_MONITOR_NAME:
-			break;
-		case EDID_DETAIL_MONITOR_CPDATA:
-			break;
-		case EDID_DETAIL_STD_MODES:
-			/* Five modes per detailed section */
-			for (j = 0; j < 5; i++) {
-				struct std_timing *std;
-				struct drm_display_mode *newmode;
-
-				std = &data->data.timings[j];
-				newmode = drm_mode_std(dev, std,
-						       edid->revision,
-						       timing_level);
-				if (newmode) {
-					drm_mode_probed_add(connector, newmode);
-					modes++;
-				}
-			}
-			break;
-		default:
-			break;
-		}
+		modes += add_detailed_modes(connector, timing, edid, quirks, 0);
 	}
 
 	return modes;
@@ -1066,19 +1144,19 @@
 			     struct i2c_adapter *adapter,
 			     char *buf, int len)
 {
-	int ret;
+	int i;
 
-	ret = drm_do_probe_ddc_edid(adapter, buf, len);
-	if (ret != 0) {
-		goto end;
+	for (i = 0; i < 4; i++) {
+		if (drm_do_probe_ddc_edid(adapter, buf, len))
+			return -1;
+		if (edid_is_valid((struct edid *)buf))
+			return 0;
 	}
-	if (!edid_is_valid((struct edid *)buf)) {
-		dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
-			 drm_get_connector_name(connector));
-		ret = -1;
-	}
-end:
-	return ret;
+
+	/* repeated checksum failures; warn, but carry on */
+	dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
+		 drm_get_connector_name(connector));
+	return -1;
 }
 
 /**
@@ -1296,6 +1374,8 @@
 					ptr->vdisplay > vdisplay)
 				continue;
 		}
+		if (drm_mode_vrefresh(ptr) > 61)
+			continue;
 		mode = drm_mode_duplicate(dev, ptr);
 		if (mode) {
 			drm_mode_probed_add(connector, mode);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 65ef011..1b49fa0 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -373,11 +373,9 @@
 					mutex_unlock(&dev->mode_config.mutex);
 				}
 			}
-			if (dpms_mode == DRM_MODE_DPMS_OFF) {
-				mutex_lock(&dev->mode_config.mutex);
-				crtc_funcs->dpms(crtc, dpms_mode);
-				mutex_unlock(&dev->mode_config.mutex);
-			}
+			mutex_lock(&dev->mode_config.mutex);
+			crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+			mutex_unlock(&dev->mode_config.mutex);
 		}
 	}
 }
@@ -385,18 +383,23 @@
 int drm_fb_helper_blank(int blank, struct fb_info *info)
 {
 	switch (blank) {
+	/* Display: On; HSync: On, VSync: On */
 	case FB_BLANK_UNBLANK:
 		drm_fb_helper_on(info);
 		break;
+	/* Display: Off; HSync: On, VSync: On */
 	case FB_BLANK_NORMAL:
-		drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
+		drm_fb_helper_off(info, DRM_MODE_DPMS_ON);
 		break;
+	/* Display: Off; HSync: Off, VSync: On */
 	case FB_BLANK_HSYNC_SUSPEND:
 		drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
 		break;
+	/* Display: Off; HSync: On, VSync: Off */
 	case FB_BLANK_VSYNC_SUSPEND:
 		drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND);
 		break;
+	/* Display: Off; HSync: Off, VSync: Off */
 	case FB_BLANK_POWERDOWN:
 		drm_fb_helper_off(info, DRM_MODE_DPMS_OFF);
 		break;
@@ -905,8 +908,13 @@
 
 	if (new_fb) {
 		info->var.pixclock = 0;
-		if (register_framebuffer(info) < 0)
+		ret = fb_alloc_cmap(&info->cmap, modeset->crtc->gamma_size, 0);
+		if (ret)
+			return ret;
+		if (register_framebuffer(info) < 0) {
+			fb_dealloc_cmap(&info->cmap);
 			return -EINVAL;
+		}
 	} else {
 		drm_fb_helper_set_par(info);
 	}
@@ -936,6 +944,7 @@
 		unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
 	}
 	drm_fb_helper_crtc_free(helper);
+	fb_dealloc_cmap(&helper->fb->fbdev->cmap);
 }
 EXPORT_SYMBOL(drm_fb_helper_free);
 
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 251bc0e..08d14df 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -257,6 +257,9 @@
 
 	INIT_LIST_HEAD(&priv->lhead);
 	INIT_LIST_HEAD(&priv->fbs);
+	INIT_LIST_HEAD(&priv->event_list);
+	init_waitqueue_head(&priv->event_wait);
+	priv->event_space = 4096; /* set aside 4k for event buffer */
 
 	if (dev->driver->driver_features & DRIVER_GEM)
 		drm_gem_open(dev, priv);
@@ -297,6 +300,18 @@
 				goto out_free;
 			}
 		}
+		mutex_lock(&dev->struct_mutex);
+		if (dev->driver->master_set) {
+			ret = dev->driver->master_set(dev, priv, true);
+			if (ret) {
+				/* drop both references if this fails */
+				drm_master_put(&priv->minor->master);
+				drm_master_put(&priv->master);
+				mutex_unlock(&dev->struct_mutex);
+				goto out_free;
+			}
+		}
+		mutex_unlock(&dev->struct_mutex);
 	} else {
 		/* get a reference to the master */
 		priv->master = drm_master_get(priv->minor->master);
@@ -413,6 +428,30 @@
 	}
 }
 
+static void drm_events_release(struct drm_file *file_priv)
+{
+	struct drm_device *dev = file_priv->minor->dev;
+	struct drm_pending_event *e, *et;
+	struct drm_pending_vblank_event *v, *vt;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+
+	/* Remove pending flips */
+	list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
+		if (v->base.file_priv == file_priv) {
+			list_del(&v->base.link);
+			drm_vblank_put(dev, v->pipe);
+			v->base.destroy(&v->base);
+		}
+
+	/* Remove unconsumed events */
+	list_for_each_entry_safe(e, et, &file_priv->event_list, link)
+		e->destroy(e);
+
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
 /**
  * Release file.
  *
@@ -451,6 +490,8 @@
 	if (file_priv->minor->master)
 		drm_master_release(dev, filp);
 
+	drm_events_release(file_priv);
+
 	if (dev->driver->driver_features & DRIVER_GEM)
 		drm_gem_release(dev, file_priv);
 
@@ -504,6 +545,8 @@
 
 		if (file_priv->minor->master == file_priv->master) {
 			/* drop the reference held my the minor */
+			if (dev->driver->master_drop)
+				dev->driver->master_drop(dev, file_priv, true);
 			drm_master_put(&file_priv->minor->master);
 		}
 	}
@@ -544,9 +587,74 @@
 }
 EXPORT_SYMBOL(drm_release);
 
-/** No-op. */
+static bool
+drm_dequeue_event(struct drm_file *file_priv,
+		  size_t total, size_t max, struct drm_pending_event **out)
+{
+	struct drm_device *dev = file_priv->minor->dev;
+	struct drm_pending_event *e;
+	unsigned long flags;
+	bool ret = false;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+
+	*out = NULL;
+	if (list_empty(&file_priv->event_list))
+		goto out;
+	e = list_first_entry(&file_priv->event_list,
+			     struct drm_pending_event, link);
+	if (e->event->length + total > max)
+		goto out;
+
+	file_priv->event_space += e->event->length;
+	list_del(&e->link);
+	*out = e;
+	ret = true;
+
+out:
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+	return ret;
+}
+
+ssize_t drm_read(struct file *filp, char __user *buffer,
+		 size_t count, loff_t *offset)
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct drm_pending_event *e;
+	size_t total;
+	ssize_t ret;
+
+	ret = wait_event_interruptible(file_priv->event_wait,
+				       !list_empty(&file_priv->event_list));
+	if (ret < 0)
+		return ret;
+
+	total = 0;
+	while (drm_dequeue_event(file_priv, total, count, &e)) {
+		if (copy_to_user(buffer + total,
+				 e->event, e->event->length)) {
+			total = -EFAULT;
+			break;
+		}
+
+		total += e->event->length;
+		e->destroy(e);
+	}
+
+	return total;
+}
+EXPORT_SYMBOL(drm_read);
+
 unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
 {
-	return 0;
+	struct drm_file *file_priv = filp->private_data;
+	unsigned int mask = 0;
+
+	poll_wait(filp, &file_priv->event_wait, wait);
+
+	if (!list_empty(&file_priv->event_list))
+		mask |= POLLIN | POLLRDNORM;
+
+	return mask;
 }
 EXPORT_SYMBOL(drm_poll);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 0a6f0b3..7998ee6 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -429,15 +429,21 @@
 
 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
 	/* Going from 0->1 means we have to enable interrupts again */
-	if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
-	    !dev->vblank_enabled[crtc]) {
-		ret = dev->driver->enable_vblank(dev, crtc);
-		DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
-		if (ret)
+	if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+		if (!dev->vblank_enabled[crtc]) {
+			ret = dev->driver->enable_vblank(dev, crtc);
+			DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
+			if (ret)
+				atomic_dec(&dev->vblank_refcount[crtc]);
+			else {
+				dev->vblank_enabled[crtc] = 1;
+				drm_update_vblank_count(dev, crtc);
+			}
+		}
+	} else {
+		if (!dev->vblank_enabled[crtc]) {
 			atomic_dec(&dev->vblank_refcount[crtc]);
-		else {
-			dev->vblank_enabled[crtc] = 1;
-			drm_update_vblank_count(dev, crtc);
+			ret = -EINVAL;
 		}
 	}
 	spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@@ -464,6 +470,18 @@
 }
 EXPORT_SYMBOL(drm_vblank_put);
 
+void drm_vblank_off(struct drm_device *dev, int crtc)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev->vbl_lock, irqflags);
+	DRM_WAKEUP(&dev->vbl_queue[crtc]);
+	dev->vblank_enabled[crtc] = 0;
+	dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
+	spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+}
+EXPORT_SYMBOL(drm_vblank_off);
+
 /**
  * drm_vblank_pre_modeset - account for vblanks across mode sets
  * @dev: DRM device
@@ -550,6 +568,63 @@
 	return ret;
 }
 
+static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
+				  union drm_wait_vblank *vblwait,
+				  struct drm_file *file_priv)
+{
+	struct drm_pending_vblank_event *e;
+	struct timeval now;
+	unsigned long flags;
+	unsigned int seq;
+
+	e = kzalloc(sizeof *e, GFP_KERNEL);
+	if (e == NULL)
+		return -ENOMEM;
+
+	e->pipe = pipe;
+	e->event.base.type = DRM_EVENT_VBLANK;
+	e->event.base.length = sizeof e->event;
+	e->event.user_data = vblwait->request.signal;
+	e->base.event = &e->event.base;
+	e->base.file_priv = file_priv;
+	e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+	do_gettimeofday(&now);
+	spin_lock_irqsave(&dev->event_lock, flags);
+
+	if (file_priv->event_space < sizeof e->event) {
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		kfree(e);
+		return -ENOMEM;
+	}
+
+	file_priv->event_space -= sizeof e->event;
+	seq = drm_vblank_count(dev, pipe);
+	if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
+	    (seq - vblwait->request.sequence) <= (1 << 23)) {
+		vblwait->request.sequence = seq + 1;
+		vblwait->reply.sequence = vblwait->request.sequence;
+	}
+
+	DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
+		  vblwait->request.sequence, seq, pipe);
+
+	e->event.sequence = vblwait->request.sequence;
+	if ((seq - vblwait->request.sequence) <= (1 << 23)) {
+		e->event.tv_sec = now.tv_sec;
+		e->event.tv_usec = now.tv_usec;
+		drm_vblank_put(dev, e->pipe);
+		list_add_tail(&e->base.link, &e->base.file_priv->event_list);
+		wake_up_interruptible(&e->base.file_priv->event_wait);
+	} else {
+		list_add_tail(&e->base.link, &dev->vblank_event_list);
+	}
+
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	return 0;
+}
+
 /**
  * Wait for VBLANK.
  *
@@ -609,6 +684,9 @@
 		goto done;
 	}
 
+	if (flags & _DRM_VBLANK_EVENT)
+		return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
+
 	if ((flags & _DRM_VBLANK_NEXTONMISS) &&
 	    (seq - vblwait->request.sequence) <= (1<<23)) {
 		vblwait->request.sequence = seq + 1;
@@ -641,6 +719,38 @@
 	return ret;
 }
 
+void drm_handle_vblank_events(struct drm_device *dev, int crtc)
+{
+	struct drm_pending_vblank_event *e, *t;
+	struct timeval now;
+	unsigned long flags;
+	unsigned int seq;
+
+	do_gettimeofday(&now);
+	seq = drm_vblank_count(dev, crtc);
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+
+	list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
+		if (e->pipe != crtc)
+			continue;
+		if ((seq - e->event.sequence) > (1<<23))
+			continue;
+
+		DRM_DEBUG("vblank event on %d, current %d\n",
+			  e->event.sequence, seq);
+
+		e->event.sequence = seq;
+		e->event.tv_sec = now.tv_sec;
+		e->event.tv_usec = now.tv_usec;
+		drm_vblank_put(dev, e->pipe);
+		list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+		wake_up_interruptible(&e->base.file_priv->event_wait);
+	}
+
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
 /**
  * drm_handle_vblank - handle a vblank event
  * @dev: DRM device
@@ -651,7 +761,11 @@
  */
 void drm_handle_vblank(struct drm_device *dev, int crtc)
 {
+	if (!dev->num_crtcs)
+		return;
+
 	atomic_inc(&dev->_vblank_count[crtc]);
 	DRM_WAKEUP(&dev->vbl_queue[crtc]);
+	drm_handle_vblank_events(dev, crtc);
 }
 EXPORT_SYMBOL(drm_handle_vblank);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 97dc5a4..d7d7eac 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -226,6 +226,44 @@
 }
 EXPORT_SYMBOL(drm_mm_get_block_generic);
 
+struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
+						unsigned long size,
+						unsigned alignment,
+						unsigned long start,
+						unsigned long end,
+						int atomic)
+{
+	struct drm_mm_node *align_splitoff = NULL;
+	unsigned tmp = 0;
+	unsigned wasted = 0;
+
+	if (node->start < start)
+		wasted += start - node->start;
+	if (alignment)
+		tmp = ((node->start + wasted) % alignment);
+
+	if (tmp)
+		wasted += alignment - tmp;
+	if (wasted) {
+		align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
+		if (unlikely(align_splitoff == NULL))
+			return NULL;
+	}
+
+	if (node->size == size) {
+		list_del_init(&node->fl_entry);
+		node->free = 0;
+	} else {
+		node = drm_mm_split_at_start(node, size, atomic);
+	}
+
+	if (align_splitoff)
+		drm_mm_put_block(align_splitoff);
+
+	return node;
+}
+EXPORT_SYMBOL(drm_mm_get_block_range_generic);
+
 /*
  * Put a block. Merge with the previous and / or next block if they are free.
  * Otherwise add to the free stack.
@@ -331,6 +369,56 @@
 }
 EXPORT_SYMBOL(drm_mm_search_free);
 
+struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
+						unsigned long size,
+						unsigned alignment,
+						unsigned long start,
+						unsigned long end,
+						int best_match)
+{
+	struct list_head *list;
+	const struct list_head *free_stack = &mm->fl_entry;
+	struct drm_mm_node *entry;
+	struct drm_mm_node *best;
+	unsigned long best_size;
+	unsigned wasted;
+
+	best = NULL;
+	best_size = ~0UL;
+
+	list_for_each(list, free_stack) {
+		entry = list_entry(list, struct drm_mm_node, fl_entry);
+		wasted = 0;
+
+		if (entry->size < size)
+			continue;
+
+		if (entry->start > end || (entry->start+entry->size) < start)
+			continue;
+
+		if (entry->start < start)
+			wasted += start - entry->start;
+
+		if (alignment) {
+			register unsigned tmp = (entry->start + wasted) % alignment;
+			if (tmp)
+				wasted += alignment - tmp;
+		}
+
+		if (entry->size >= size + wasted) {
+			if (!best_match)
+				return entry;
+			if (size < best_size) {
+				best = entry;
+				best_size = entry->size;
+			}
+		}
+	}
+
+	return best;
+}
+EXPORT_SYMBOL(drm_mm_search_free_in_range);
+
 int drm_mm_clean(struct drm_mm * mm)
 {
 	struct list_head *head = &mm->ml_entry;
@@ -381,6 +469,26 @@
 }
 EXPORT_SYMBOL(drm_mm_takedown);
 
+void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
+{
+	struct drm_mm_node *entry;
+	int total_used = 0, total_free = 0, total = 0;
+
+	list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
+		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
+			prefix, entry->start, entry->start + entry->size,
+			entry->size, entry->free ? "free" : "used");
+		total += entry->size;
+		if (entry->free)
+			total_free += entry->size;
+		else
+			total_used += entry->size;
+	}
+	printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
+		total_used, total_free);
+}
+EXPORT_SYMBOL(drm_mm_debug_table);
+
 #if defined(CONFIG_DEBUG_FS)
 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
 {
@@ -395,7 +503,7 @@
 		else
 			total_used += entry->size;
 	}
-	seq_printf(m, "total: %d, used %d free %d\n", total, total_free, total_used);
+	seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
 	return 0;
 }
 EXPORT_SYMBOL(drm_mm_dump_table);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 51f6772..6d81a02 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -553,6 +553,32 @@
 }
 EXPORT_SYMBOL(drm_mode_height);
 
+/** drm_mode_hsync - get the hsync of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @modes's hsync rate in kHz, rounded to the nearest int.
+ */
+int drm_mode_hsync(struct drm_display_mode *mode)
+{
+	unsigned int calc_val;
+
+	if (mode->hsync)
+		return mode->hsync;
+
+	if (mode->htotal < 0)
+		return 0;
+
+	calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
+	calc_val += 500;				/* round to 1000Hz */
+	calc_val /= 1000;				/* truncate to kHz */
+
+	return calc_val;
+}
+EXPORT_SYMBOL(drm_mode_hsync);
+
 /**
  * drm_mode_vrefresh - get the vrefresh of a mode
  * @mode: mode
@@ -560,7 +586,7 @@
  * LOCKING:
  * None.
  *
- * Return @mode's vrefresh rate or calculate it if necessary.
+ * Return @mode's vrefresh rate in Hz or calculate it if necessary.
  *
  * FIXME: why is this needed?  shouldn't vrefresh be set already?
  *
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 55bb8a8..ad73e14 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -128,6 +128,7 @@
 	kref_get(&master->refcount);
 	return master;
 }
+EXPORT_SYMBOL(drm_master_get);
 
 static void drm_master_destroy(struct kref *kref)
 {
@@ -170,10 +171,13 @@
 	kref_put(&(*master)->refcount, drm_master_destroy);
 	*master = NULL;
 }
+EXPORT_SYMBOL(drm_master_put);
 
 int drm_setmaster_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_priv)
 {
+	int ret = 0;
+
 	if (file_priv->is_master)
 		return 0;
 
@@ -188,6 +192,13 @@
 		mutex_lock(&dev->struct_mutex);
 		file_priv->minor->master = drm_master_get(file_priv->master);
 		file_priv->is_master = 1;
+		if (dev->driver->master_set) {
+			ret = dev->driver->master_set(dev, file_priv, false);
+			if (unlikely(ret != 0)) {
+				file_priv->is_master = 0;
+				drm_master_put(&file_priv->minor->master);
+			}
+		}
 		mutex_unlock(&dev->struct_mutex);
 	}
 
@@ -204,6 +215,8 @@
 		return -EINVAL;
 
 	mutex_lock(&dev->struct_mutex);
+	if (dev->driver->master_drop)
+		dev->driver->master_drop(dev, file_priv, false);
 	drm_master_put(&file_priv->minor->master);
 	file_priv->is_master = 0;
 	mutex_unlock(&dev->struct_mutex);
@@ -220,9 +233,11 @@
 	INIT_LIST_HEAD(&dev->ctxlist);
 	INIT_LIST_HEAD(&dev->vmalist);
 	INIT_LIST_HEAD(&dev->maplist);
+	INIT_LIST_HEAD(&dev->vblank_event_list);
 
 	spin_lock_init(&dev->count_lock);
 	spin_lock_init(&dev->drw_lock);
+	spin_lock_init(&dev->event_lock);
 	init_timer(&dev->timer);
 	mutex_init(&dev->struct_mutex);
 	mutex_init(&dev->ctxlist_mutex);
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
new file mode 100644
index 0000000..6d2abaf
--- /dev/null
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -0,0 +1,4 @@
+ccflags-y := -Iinclude/drm
+
+ch7006-y := ch7006_drv.o ch7006_mode.o
+obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
new file mode 100644
index 0000000..9422a74
--- /dev/null
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -0,0 +1,531 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ch7006_priv.h"
+
+/* DRM encoder functions */
+
+static void ch7006_encoder_set_config(struct drm_encoder *encoder,
+				      void *params)
+{
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+
+	priv->params = params;
+}
+
+static void ch7006_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+
+	drm_property_destroy(encoder->dev, priv->scale_property);
+
+	kfree(priv);
+	to_encoder_slave(encoder)->slave_priv = NULL;
+
+	drm_i2c_encoder_destroy(encoder);
+}
+
+static void  ch7006_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+	struct ch7006_state *state = &priv->state;
+
+	ch7006_dbg(client, "\n");
+
+	if (mode == priv->last_dpms)
+		return;
+	priv->last_dpms = mode;
+
+	ch7006_setup_power_state(encoder);
+
+	ch7006_load_reg(client, state, CH7006_POWER);
+}
+
+static void ch7006_encoder_save(struct drm_encoder *encoder)
+{
+	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+
+	ch7006_dbg(client, "\n");
+
+	ch7006_state_save(client, &priv->saved_state);
+}
+
+static void ch7006_encoder_restore(struct drm_encoder *encoder)
+{
+	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+
+	ch7006_dbg(client, "\n");
+
+	ch7006_state_load(client, &priv->saved_state);
+}
+
+static bool ch7006_encoder_mode_fixup(struct drm_encoder *encoder,
+				      struct drm_display_mode *mode,
+				      struct drm_display_mode *adjusted_mode)
+{
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+
+	/* The ch7006 is painfully picky with the input timings so no
+	 * custom modes for now... */
+
+	priv->mode = ch7006_lookup_mode(encoder, mode);
+
+	return !!priv->mode;
+}
+
+static int ch7006_encoder_mode_valid(struct drm_encoder *encoder,
+				     struct drm_display_mode *mode)
+{
+	if (ch7006_lookup_mode(encoder, mode))
+		return MODE_OK;
+	else
+		return MODE_BAD;
+}
+
+static void ch7006_encoder_mode_set(struct drm_encoder *encoder,
+				     struct drm_display_mode *drm_mode,
+				     struct drm_display_mode *adjusted_mode)
+{
+	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+	struct ch7006_encoder_params *params = priv->params;
+	struct ch7006_state *state = &priv->state;
+	uint8_t *regs = state->regs;
+	struct ch7006_mode *mode = priv->mode;
+	struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
+	int start_active;
+
+	ch7006_dbg(client, "\n");
+
+	regs[CH7006_DISPMODE] = norm->dispmode | mode->dispmode;
+	regs[CH7006_BWIDTH] = 0;
+	regs[CH7006_INPUT_FORMAT] = bitf(CH7006_INPUT_FORMAT_FORMAT,
+					 params->input_format);
+
+	regs[CH7006_CLKMODE] = CH7006_CLKMODE_SUBC_LOCK
+		| bitf(CH7006_CLKMODE_XCM, params->xcm)
+		| bitf(CH7006_CLKMODE_PCM, params->pcm);
+	if (params->clock_mode)
+		regs[CH7006_CLKMODE] |= CH7006_CLKMODE_MASTER;
+	if (params->clock_edge)
+		regs[CH7006_CLKMODE] |= CH7006_CLKMODE_POS_EDGE;
+
+	start_active = (drm_mode->htotal & ~0x7) - (drm_mode->hsync_start & ~0x7);
+	regs[CH7006_POV] = bitf(CH7006_POV_START_ACTIVE_8, start_active);
+	regs[CH7006_START_ACTIVE] = bitf(CH7006_START_ACTIVE_0, start_active);
+
+	regs[CH7006_INPUT_SYNC] = 0;
+	if (params->sync_direction)
+		regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_OUTPUT;
+	if (params->sync_encoding)
+		regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_EMBEDDED;
+	if (drm_mode->flags & DRM_MODE_FLAG_PVSYNC)
+		regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_PVSYNC;
+	if (drm_mode->flags & DRM_MODE_FLAG_PHSYNC)
+		regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_PHSYNC;
+
+	regs[CH7006_DETECT] = 0;
+	regs[CH7006_BCLKOUT] = 0;
+
+	regs[CH7006_SUBC_INC3] = 0;
+	if (params->pout_level)
+		regs[CH7006_SUBC_INC3] |= CH7006_SUBC_INC3_POUT_3_3V;
+
+	regs[CH7006_SUBC_INC4] = 0;
+	if (params->active_detect)
+		regs[CH7006_SUBC_INC4] |= CH7006_SUBC_INC4_DS_INPUT;
+
+	regs[CH7006_PLL_CONTROL] = priv->saved_state.regs[CH7006_PLL_CONTROL];
+
+	ch7006_setup_levels(encoder);
+	ch7006_setup_subcarrier(encoder);
+	ch7006_setup_pll(encoder);
+	ch7006_setup_power_state(encoder);
+	ch7006_setup_properties(encoder);
+
+	ch7006_state_load(client, state);
+}
+
+static enum drm_connector_status ch7006_encoder_detect(struct drm_encoder *encoder,
+						       struct drm_connector *connector)
+{
+	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+	struct ch7006_state *state = &priv->state;
+	int det;
+
+	ch7006_dbg(client, "\n");
+
+	ch7006_save_reg(client, state, CH7006_DETECT);
+	ch7006_save_reg(client, state, CH7006_POWER);
+	ch7006_save_reg(client, state, CH7006_CLKMODE);
+
+	ch7006_write(client, CH7006_POWER, CH7006_POWER_RESET |
+					   bitfs(CH7006_POWER_LEVEL, NORMAL));
+	ch7006_write(client, CH7006_CLKMODE, CH7006_CLKMODE_MASTER);
+
+	ch7006_write(client, CH7006_DETECT, CH7006_DETECT_SENSE);
+
+	ch7006_write(client, CH7006_DETECT, 0);
+
+	det = ch7006_read(client, CH7006_DETECT);
+
+	ch7006_load_reg(client, state, CH7006_CLKMODE);
+	ch7006_load_reg(client, state, CH7006_POWER);
+	ch7006_load_reg(client, state, CH7006_DETECT);
+
+	if ((det & (CH7006_DETECT_SVIDEO_Y_TEST|
+		    CH7006_DETECT_SVIDEO_C_TEST|
+		    CH7006_DETECT_CVBS_TEST)) == 0)
+		priv->subconnector = DRM_MODE_SUBCONNECTOR_SCART;
+	else if ((det & (CH7006_DETECT_SVIDEO_Y_TEST|
+			 CH7006_DETECT_SVIDEO_C_TEST)) == 0)
+		priv->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO;
+	else if ((det & CH7006_DETECT_CVBS_TEST) == 0)
+		priv->subconnector = DRM_MODE_SUBCONNECTOR_Composite;
+	else
+		priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
+
+	drm_connector_property_set_value(connector,
+			encoder->dev->mode_config.tv_subconnector_property,
+							priv->subconnector);
+
+	return priv->subconnector ? connector_status_connected :
+					connector_status_disconnected;
+}
+
+static int ch7006_encoder_get_modes(struct drm_encoder *encoder,
+				    struct drm_connector *connector)
+{
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+	struct ch7006_mode *mode;
+	int n = 0;
+
+	for (mode = ch7006_modes; mode->mode.clock; mode++) {
+		if (~mode->valid_scales & 1<<priv->scale ||
+		    ~mode->valid_norms & 1<<priv->norm)
+			continue;
+
+		drm_mode_probed_add(connector,
+				drm_mode_duplicate(encoder->dev, &mode->mode));
+
+		n++;
+	}
+
+	return n;
+}
+
+static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
+					   struct drm_connector *connector)
+{
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct drm_mode_config *conf = &dev->mode_config;
+
+	drm_mode_create_tv_properties(dev, NUM_TV_NORMS, ch7006_tv_norm_names);
+
+	priv->scale_property = drm_property_create(dev, DRM_MODE_PROP_RANGE,
+						   "scale", 2);
+	priv->scale_property->values[0] = 0;
+	priv->scale_property->values[1] = 2;
+
+	drm_connector_attach_property(connector, conf->tv_select_subconnector_property,
+				      priv->select_subconnector);
+	drm_connector_attach_property(connector, conf->tv_subconnector_property,
+				      priv->subconnector);
+	drm_connector_attach_property(connector, conf->tv_left_margin_property,
+				      priv->hmargin);
+	drm_connector_attach_property(connector, conf->tv_bottom_margin_property,
+				      priv->vmargin);
+	drm_connector_attach_property(connector, conf->tv_mode_property,
+				      priv->norm);
+	drm_connector_attach_property(connector, conf->tv_brightness_property,
+				      priv->brightness);
+	drm_connector_attach_property(connector, conf->tv_contrast_property,
+				      priv->contrast);
+	drm_connector_attach_property(connector, conf->tv_flicker_reduction_property,
+				      priv->flicker);
+	drm_connector_attach_property(connector, priv->scale_property,
+				      priv->scale);
+
+	return 0;
+}
+
+static int ch7006_encoder_set_property(struct drm_encoder *encoder,
+				       struct drm_connector *connector,
+				       struct drm_property *property,
+				       uint64_t val)
+{
+	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+	struct ch7006_state *state = &priv->state;
+	struct drm_mode_config *conf = &encoder->dev->mode_config;
+	struct drm_crtc *crtc = encoder->crtc;
+	bool modes_changed = false;
+
+	ch7006_dbg(client, "\n");
+
+	if (property == conf->tv_select_subconnector_property) {
+		priv->select_subconnector = val;
+
+		ch7006_setup_power_state(encoder);
+
+		ch7006_load_reg(client, state, CH7006_POWER);
+
+	} else if (property == conf->tv_left_margin_property) {
+		priv->hmargin = val;
+
+		ch7006_setup_properties(encoder);
+
+		ch7006_load_reg(client, state, CH7006_POV);
+		ch7006_load_reg(client, state, CH7006_HPOS);
+
+	} else if (property == conf->tv_bottom_margin_property) {
+		priv->vmargin = val;
+
+		ch7006_setup_properties(encoder);
+
+		ch7006_load_reg(client, state, CH7006_POV);
+		ch7006_load_reg(client, state, CH7006_VPOS);
+
+	} else if (property == conf->tv_mode_property) {
+		if (connector->dpms != DRM_MODE_DPMS_OFF)
+			return -EINVAL;
+
+		priv->norm = val;
+
+		modes_changed = true;
+
+	} else if (property == conf->tv_brightness_property) {
+		priv->brightness = val;
+
+		ch7006_setup_levels(encoder);
+
+		ch7006_load_reg(client, state, CH7006_BLACK_LEVEL);
+
+	} else if (property == conf->tv_contrast_property) {
+		priv->contrast = val;
+
+		ch7006_setup_properties(encoder);
+
+		ch7006_load_reg(client, state, CH7006_CONTRAST);
+
+	} else if (property == conf->tv_flicker_reduction_property) {
+		priv->flicker = val;
+
+		ch7006_setup_properties(encoder);
+
+		ch7006_load_reg(client, state, CH7006_FFILTER);
+
+	} else if (property == priv->scale_property) {
+		if (connector->dpms != DRM_MODE_DPMS_OFF)
+			return -EINVAL;
+
+		priv->scale = val;
+
+		modes_changed = true;
+
+	} else {
+		return -EINVAL;
+	}
+
+	if (modes_changed) {
+		drm_helper_probe_single_connector_modes(connector, 0, 0);
+
+		/* Disable the crtc to ensure a full modeset is
+		 * performed whenever it's turned on again. */
+		if (crtc) {
+			struct drm_mode_set modeset = {
+				.crtc = crtc,
+			};
+
+			crtc->funcs->set_config(&modeset);
+		}
+	}
+
+	return 0;
+}
+
+static struct drm_encoder_slave_funcs ch7006_encoder_funcs = {
+	.set_config = ch7006_encoder_set_config,
+	.destroy = ch7006_encoder_destroy,
+	.dpms = ch7006_encoder_dpms,
+	.save = ch7006_encoder_save,
+	.restore = ch7006_encoder_restore,
+	.mode_fixup = ch7006_encoder_mode_fixup,
+	.mode_valid = ch7006_encoder_mode_valid,
+	.mode_set = ch7006_encoder_mode_set,
+	.detect = ch7006_encoder_detect,
+	.get_modes = ch7006_encoder_get_modes,
+	.create_resources = ch7006_encoder_create_resources,
+	.set_property = ch7006_encoder_set_property,
+};
+
+
+/* I2C driver functions */
+
+static int ch7006_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+	uint8_t addr = CH7006_VERSION_ID;
+	uint8_t val;
+	int ret;
+
+	ch7006_dbg(client, "\n");
+
+	ret = i2c_master_send(client, &addr, sizeof(addr));
+	if (ret < 0)
+		goto fail;
+
+	ret = i2c_master_recv(client, &val, sizeof(val));
+	if (ret < 0)
+		goto fail;
+
+	ch7006_info(client, "Detected version ID: %x\n", val);
+
+	return 0;
+
+fail:
+	ch7006_err(client, "Error %d reading version ID\n", ret);
+
+	return -ENODEV;
+}
+
+static int ch7006_remove(struct i2c_client *client)
+{
+	ch7006_dbg(client, "\n");
+
+	return 0;
+}
+
+static int ch7006_encoder_init(struct i2c_client *client,
+			       struct drm_device *dev,
+			       struct drm_encoder_slave *encoder)
+{
+	struct ch7006_priv *priv;
+	int i;
+
+	ch7006_dbg(client, "\n");
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	encoder->slave_priv = priv;
+	encoder->slave_funcs = &ch7006_encoder_funcs;
+
+	priv->norm = TV_NORM_PAL;
+	priv->select_subconnector = DRM_MODE_SUBCONNECTOR_Automatic;
+	priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
+	priv->scale = 1;
+	priv->contrast = 50;
+	priv->brightness = 50;
+	priv->flicker = 50;
+	priv->hmargin = 50;
+	priv->vmargin = 50;
+	priv->last_dpms = -1;
+
+	if (ch7006_tv_norm) {
+		for (i = 0; i < NUM_TV_NORMS; i++) {
+			if (!strcmp(ch7006_tv_norm_names[i], ch7006_tv_norm)) {
+				priv->norm = i;
+				break;
+			}
+		}
+
+		if (i == NUM_TV_NORMS)
+			ch7006_err(client, "Invalid TV norm setting \"%s\".\n",
+				   ch7006_tv_norm);
+	}
+
+	if (ch7006_scale >= 0 && ch7006_scale <= 2)
+		priv->scale = ch7006_scale;
+	else
+		ch7006_err(client, "Invalid scale setting \"%d\".\n",
+			   ch7006_scale);
+
+	return 0;
+}
+
+static struct i2c_device_id ch7006_ids[] = {
+	{ "ch7006", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, ch7006_ids);
+
+static struct drm_i2c_encoder_driver ch7006_driver = {
+	.i2c_driver = {
+		.probe = ch7006_probe,
+		.remove = ch7006_remove,
+
+		.driver = {
+			.name = "ch7006",
+		},
+
+		.id_table = ch7006_ids,
+	},
+
+	.encoder_init = ch7006_encoder_init,
+};
+
+
+/* Module initialization */
+
+static int __init ch7006_init(void)
+{
+	return drm_i2c_encoder_register(THIS_MODULE, &ch7006_driver);
+}
+
+static void __exit ch7006_exit(void)
+{
+	drm_i2c_encoder_unregister(&ch7006_driver);
+}
+
+int ch7006_debug;
+module_param_named(debug, ch7006_debug, int, 0600);
+MODULE_PARM_DESC(debug, "Enable debug output.");
+
+char *ch7006_tv_norm;
+module_param_named(tv_norm, ch7006_tv_norm, charp, 0600);
+MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
+		 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, PAL-60, NTSC-M, NTSC-J.\n"
+		 "\t\tDefault: PAL");
+
+int ch7006_scale = 1;
+module_param_named(scale, ch7006_scale, int, 0600);
+MODULE_PARM_DESC(scale, "Default scale.\n"
+		 "\t\tSupported: 0 -> Select video modes with a higher blanking ratio.\n"
+		 "\t\t\t1 -> Select default video modes.\n"
+		 "\t\t\t2 -> Select video modes with a lower blanking ratio.");
+
+MODULE_AUTHOR("Francisco Jerez <currojerez@riseup.net>");
+MODULE_DESCRIPTION("Chrontel ch7006 TV encoder driver");
+MODULE_LICENSE("GPL and additional rights");
+
+module_init(ch7006_init);
+module_exit(ch7006_exit);
diff --git a/drivers/gpu/drm/i2c/ch7006_mode.c b/drivers/gpu/drm/i2c/ch7006_mode.c
new file mode 100644
index 0000000..87f5445
--- /dev/null
+++ b/drivers/gpu/drm/i2c/ch7006_mode.c
@@ -0,0 +1,473 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ch7006_priv.h"
+
+char *ch7006_tv_norm_names[] = {
+	[TV_NORM_PAL] = "PAL",
+	[TV_NORM_PAL_M] = "PAL-M",
+	[TV_NORM_PAL_N] = "PAL-N",
+	[TV_NORM_PAL_NC] = "PAL-Nc",
+	[TV_NORM_PAL_60] = "PAL-60",
+	[TV_NORM_NTSC_M] = "NTSC-M",
+	[TV_NORM_NTSC_J] = "NTSC-J",
+};
+
+#define NTSC_LIKE_TIMINGS .vrefresh = 60 * fixed1/1.001,		\
+		.vdisplay = 480,					\
+		.vtotal = 525,						\
+		.hvirtual = 660
+
+#define PAL_LIKE_TIMINGS .vrefresh = 50 * fixed1,		\
+		.vdisplay = 576,				\
+		.vtotal = 625,					\
+		.hvirtual = 810
+
+struct ch7006_tv_norm_info ch7006_tv_norms[] = {
+	[TV_NORM_NTSC_M] = {
+		NTSC_LIKE_TIMINGS,
+		.black_level = 0.339 * fixed1,
+		.subc_freq = 3579545 * fixed1,
+		.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, NTSC),
+		.voffset = 0,
+	},
+	[TV_NORM_NTSC_J] = {
+		NTSC_LIKE_TIMINGS,
+		.black_level = 0.286 * fixed1,
+		.subc_freq = 3579545 * fixed1,
+		.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, NTSC_J),
+		.voffset = 0,
+	},
+	[TV_NORM_PAL] = {
+		PAL_LIKE_TIMINGS,
+		.black_level = 0.3 * fixed1,
+		.subc_freq = 4433618.75 * fixed1,
+		.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL),
+		.voffset = 0,
+	},
+	[TV_NORM_PAL_M] = {
+		NTSC_LIKE_TIMINGS,
+		.black_level = 0.339 * fixed1,
+		.subc_freq = 3575611.433 * fixed1,
+		.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL_M),
+		.voffset = 16,
+	},
+
+	/* The following modes seem to work right but they're
+	 * undocumented */
+
+	[TV_NORM_PAL_N] = {
+		PAL_LIKE_TIMINGS,
+		.black_level = 0.339 * fixed1,
+		.subc_freq = 4433618.75 * fixed1,
+		.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL),
+		.voffset = 0,
+	},
+	[TV_NORM_PAL_NC] = {
+		PAL_LIKE_TIMINGS,
+		.black_level = 0.3 * fixed1,
+		.subc_freq = 3582056.25 * fixed1,
+		.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL),
+		.voffset = 0,
+	},
+	[TV_NORM_PAL_60] = {
+		NTSC_LIKE_TIMINGS,
+		.black_level = 0.3 * fixed1,
+		.subc_freq = 4433618.75 * fixed1,
+		.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL_M),
+		.voffset = 16,
+	},
+};
+
+#define __MODE(f, hd, vd, ht, vt, hsynp, vsynp,				\
+	       subc, scale, scale_mask, norm_mask, e_hd, e_vd) {	\
+		.mode = {						\
+			.name = #hd "x" #vd,				\
+			.status = 0,					\
+			.type = DRM_MODE_TYPE_DRIVER,			\
+			.clock = f,					\
+			.hdisplay = hd,					\
+			.hsync_start = e_hd + 16,			\
+			.hsync_end = e_hd + 80,				\
+			.htotal = ht,					\
+			.hskew = 0,					\
+			.vdisplay = vd,					\
+			.vsync_start = vd + 10,				\
+			.vsync_end = vd + 26,				\
+			.vtotal = vt,					\
+			.vscan = 0,					\
+			.flags = DRM_MODE_FLAG_##hsynp##HSYNC |		\
+				DRM_MODE_FLAG_##vsynp##VSYNC,		\
+			.vrefresh = 0,					\
+		},							\
+		.enc_hdisp = e_hd,					\
+		.enc_vdisp = e_vd,					\
+		.subc_coeff = subc * fixed1,				\
+		.dispmode = bitfs(CH7006_DISPMODE_SCALING_RATIO, scale) | \
+			    bitfs(CH7006_DISPMODE_INPUT_RES, e_hd##x##e_vd), \
+		.valid_scales = scale_mask,				\
+		.valid_norms = norm_mask				\
+	 }
+
+#define MODE(f, hd, vd, ht, vt, hsynp, vsynp,				\
+	     subc, scale, scale_mask, norm_mask)			\
+	__MODE(f, hd, vd, ht, vt, hsynp, vsynp, subc, scale,		\
+	       scale_mask, norm_mask, hd, vd)
+
+#define NTSC_LIKE (1 << TV_NORM_NTSC_M | 1 << TV_NORM_NTSC_J |		\
+		   1 << TV_NORM_PAL_M | 1 << TV_NORM_PAL_60)
+
+#define PAL_LIKE (1 << TV_NORM_PAL | 1 << TV_NORM_PAL_N | 1 << TV_NORM_PAL_NC)
+
+struct ch7006_mode ch7006_modes[] = {
+	MODE(21000, 512, 384, 840, 500, N, N, 181.797557582, 5_4, 0x6, PAL_LIKE),
+	MODE(26250, 512, 384, 840, 625, N, N, 145.438046066, 1_1, 0x1, PAL_LIKE),
+	MODE(20140, 512, 384, 800, 420, N, N, 213.257083791, 5_4, 0x4, NTSC_LIKE),
+	MODE(24671, 512, 384, 784, 525, N, N, 174.0874153, 1_1, 0x3, NTSC_LIKE),
+	MODE(28125, 720, 400, 1125, 500, N, N, 135.742176298, 5_4, 0x6, PAL_LIKE),
+	MODE(34875, 720, 400, 1116, 625, N, N, 109.469496898, 1_1, 0x1, PAL_LIKE),
+	MODE(23790, 720, 400, 945, 420, N, N, 160.475642016, 5_4, 0x4, NTSC_LIKE),
+	MODE(29455, 720, 400, 936, 525, N, N, 129.614941843, 1_1, 0x3, NTSC_LIKE),
+	MODE(25000, 640, 400, 1000, 500, N, N, 152.709948279, 5_4, 0x6, PAL_LIKE),
+	MODE(31500, 640, 400, 1008, 625, N, N, 121.198371646, 1_1, 0x1, PAL_LIKE),
+	MODE(21147, 640, 400, 840, 420, N, N, 180.535097338, 5_4, 0x4, NTSC_LIKE),
+	MODE(26434, 640, 400, 840, 525, N, N, 144.42807787, 1_1, 0x2, NTSC_LIKE),
+	MODE(30210, 640, 400, 840, 600, N, N, 126.374568276, 7_8, 0x1, NTSC_LIKE),
+	MODE(21000, 640, 480, 840, 500, N, N, 181.797557582, 5_4, 0x4, PAL_LIKE),
+	MODE(26250, 640, 480, 840, 625, N, N, 145.438046066, 1_1, 0x2, PAL_LIKE),
+	MODE(31500, 640, 480, 840, 750, N, N, 121.198371646, 5_6, 0x1, PAL_LIKE),
+	MODE(24671, 640, 480, 784, 525, N, N, 174.0874153, 1_1, 0x4, NTSC_LIKE),
+	MODE(28196, 640, 480, 784, 600, N, N, 152.326488422, 7_8, 0x2, NTSC_LIKE),
+	MODE(30210, 640, 480, 800, 630, N, N, 142.171389101, 5_6, 0x1, NTSC_LIKE),
+	__MODE(29500, 720, 576, 944, 625, P, P, 145.592111636, 1_1, 0x7, PAL_LIKE, 800, 600),
+	MODE(36000, 800, 600, 960, 750, P, P, 119.304647022, 5_6, 0x6, PAL_LIKE),
+	MODE(39000, 800, 600, 936, 836, P, P, 110.127366499, 3_4, 0x1, PAL_LIKE),
+	MODE(39273, 800, 600, 1040, 630, P, P, 145.816809399, 5_6, 0x4, NTSC_LIKE),
+	MODE(43636, 800, 600, 1040, 700, P, P, 131.235128487, 3_4, 0x2, NTSC_LIKE),
+	MODE(47832, 800, 600, 1064, 750, P, P, 119.723275165, 7_10, 0x1, NTSC_LIKE),
+	{}
+};
+
+struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
+				       struct drm_display_mode *drm_mode)
+{
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+	struct ch7006_mode *mode;
+
+	for (mode = ch7006_modes; mode->mode.clock; mode++) {
+
+		if (~mode->valid_norms & 1<<priv->norm)
+			continue;
+
+		if (mode->mode.hdisplay != drm_mode->hdisplay ||
+		    mode->mode.vdisplay != drm_mode->vdisplay ||
+		    mode->mode.vtotal != drm_mode->vtotal ||
+		    mode->mode.htotal != drm_mode->htotal ||
+		    mode->mode.clock != drm_mode->clock)
+			continue;
+
+		return mode;
+	}
+
+	return NULL;
+}
+
+/* Some common HW state calculation code */
+
+void ch7006_setup_levels(struct drm_encoder *encoder)
+{
+	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+	uint8_t *regs = priv->state.regs;
+	struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
+	int gain;
+	int black_level;
+
+	/* Set DAC_GAIN if the voltage drop between white and black is
+	 * high enough. */
+	if (norm->black_level < 339*fixed1/1000) {
+		gain = 76;
+
+		regs[CH7006_INPUT_FORMAT] |= CH7006_INPUT_FORMAT_DAC_GAIN;
+	} else {
+		gain = 71;
+
+		regs[CH7006_INPUT_FORMAT] &= ~CH7006_INPUT_FORMAT_DAC_GAIN;
+	}
+
+	black_level = round_fixed(norm->black_level*26625)/gain;
+
+	/* Correct it with the specified brightness. */
+	black_level = interpolate(90, black_level, 208, priv->brightness);
+
+	regs[CH7006_BLACK_LEVEL] = bitf(CH7006_BLACK_LEVEL_0, black_level);
+
+	ch7006_dbg(client, "black level: %d\n", black_level);
+}
+
+void ch7006_setup_subcarrier(struct drm_encoder *encoder)
+{
+	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+	struct ch7006_state *state = &priv->state;
+	struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
+	struct ch7006_mode *mode = priv->mode;
+	uint32_t subc_inc;
+
+	subc_inc = round_fixed((mode->subc_coeff >> 8)
+			       * (norm->subc_freq >> 24));
+
+	setbitf(state, CH7006_SUBC_INC0, 28, subc_inc);
+	setbitf(state, CH7006_SUBC_INC1, 24, subc_inc);
+	setbitf(state, CH7006_SUBC_INC2, 20, subc_inc);
+	setbitf(state, CH7006_SUBC_INC3, 16, subc_inc);
+	setbitf(state, CH7006_SUBC_INC4, 12, subc_inc);
+	setbitf(state, CH7006_SUBC_INC5, 8, subc_inc);
+	setbitf(state, CH7006_SUBC_INC6, 4, subc_inc);
+	setbitf(state, CH7006_SUBC_INC7, 0, subc_inc);
+
+	ch7006_dbg(client, "subcarrier inc: %u\n", subc_inc);
+}
+
+void ch7006_setup_pll(struct drm_encoder *encoder)
+{
+	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+	uint8_t *regs = priv->state.regs;
+	struct ch7006_mode *mode = priv->mode;
+	int n, best_n = 0;
+	int m, best_m = 0;
+	int freq, best_freq = 0;
+
+	for (n = 0; n < CH7006_MAXN; n++) {
+		for (m = 0; m < CH7006_MAXM; m++) {
+			freq = CH7006_FREQ0*(n+2)/(m+2);
+
+			if (abs(freq - mode->mode.clock) <
+			    abs(best_freq - mode->mode.clock)) {
+				best_freq = freq;
+				best_n = n;
+				best_m = m;
+			}
+		}
+	}
+
+	regs[CH7006_PLLOV] = bitf(CH7006_PLLOV_N_8, best_n) |
+		bitf(CH7006_PLLOV_M_8, best_m);
+
+	regs[CH7006_PLLM] = bitf(CH7006_PLLM_0, best_m);
+	regs[CH7006_PLLN] = bitf(CH7006_PLLN_0, best_n);
+
+	if (best_n < 108)
+		regs[CH7006_PLL_CONTROL] |= CH7006_PLL_CONTROL_CAPACITOR;
+	else
+		regs[CH7006_PLL_CONTROL] &= ~CH7006_PLL_CONTROL_CAPACITOR;
+
+	ch7006_dbg(client, "n=%d m=%d f=%d c=%d\n",
+		   best_n, best_m, best_freq, best_n < 108);
+}
+
+void ch7006_setup_power_state(struct drm_encoder *encoder)
+{
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+	uint8_t *power = &priv->state.regs[CH7006_POWER];
+	int subconnector;
+
+	subconnector = priv->select_subconnector ? priv->select_subconnector :
+							priv->subconnector;
+
+	*power = CH7006_POWER_RESET;
+
+	if (priv->last_dpms == DRM_MODE_DPMS_ON) {
+		switch (subconnector) {
+		case DRM_MODE_SUBCONNECTOR_SVIDEO:
+			*power |= bitfs(CH7006_POWER_LEVEL, CVBS_OFF);
+			break;
+		case DRM_MODE_SUBCONNECTOR_Composite:
+			*power |= bitfs(CH7006_POWER_LEVEL, SVIDEO_OFF);
+			break;
+		case DRM_MODE_SUBCONNECTOR_SCART:
+			*power |= bitfs(CH7006_POWER_LEVEL, NORMAL) |
+				CH7006_POWER_SCART;
+			break;
+		}
+
+	} else {
+		*power |= bitfs(CH7006_POWER_LEVEL, FULL_POWER_OFF);
+	}
+}
+
+void ch7006_setup_properties(struct drm_encoder *encoder)
+{
+	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+	struct ch7006_state *state = &priv->state;
+	struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
+	struct ch7006_mode *ch_mode = priv->mode;
+	struct drm_display_mode *mode = &ch_mode->mode;
+	uint8_t *regs = state->regs;
+	int flicker, contrast, hpos, vpos;
+	uint64_t scale, aspect;
+
+	flicker = interpolate(0, 2, 3, priv->flicker);
+	regs[CH7006_FFILTER] = bitf(CH7006_FFILTER_TEXT, flicker) |
+		bitf(CH7006_FFILTER_LUMA, flicker) |
+		bitf(CH7006_FFILTER_CHROMA, 1);
+
+	contrast = interpolate(0, 5, 7, priv->contrast);
+	regs[CH7006_CONTRAST] = bitf(CH7006_CONTRAST_0, contrast);
+
+	scale = norm->vtotal*fixed1;
+	do_div(scale, mode->vtotal);
+
+	aspect = ch_mode->enc_hdisp*fixed1;
+	do_div(aspect, ch_mode->enc_vdisp);
+
+	hpos = round_fixed((norm->hvirtual * aspect - mode->hdisplay * scale)
+			   * priv->hmargin * mode->vtotal) / norm->vtotal / 100 / 4;
+
+	setbitf(state, CH7006_POV, HPOS_8, hpos);
+	setbitf(state, CH7006_HPOS, 0, hpos);
+
+	vpos = max(0, norm->vdisplay - round_fixed(mode->vdisplay*scale)
+		   + norm->voffset) * priv->vmargin / 100 / 2;
+
+	setbitf(state, CH7006_POV, VPOS_8, vpos);
+	setbitf(state, CH7006_VPOS, 0, vpos);
+
+	ch7006_dbg(client, "hpos: %d, vpos: %d\n", hpos, vpos);
+}
+
+/* HW access functions */
+
+void ch7006_write(struct i2c_client *client, uint8_t addr, uint8_t val)
+{
+	uint8_t buf[] = {addr, val};
+	int ret;
+
+	ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
+	if (ret < 0)
+		ch7006_err(client, "Error %d writing to subaddress 0x%x\n",
+			   ret, addr);
+}
+
+uint8_t ch7006_read(struct i2c_client *client, uint8_t addr)
+{
+	uint8_t val;
+	int ret;
+
+	ret = i2c_master_send(client, &addr, sizeof(addr));
+	if (ret < 0)
+		goto fail;
+
+	ret = i2c_master_recv(client, &val, sizeof(val));
+	if (ret < 0)
+		goto fail;
+
+	return val;
+
+fail:
+	ch7006_err(client, "Error %d reading from subaddress 0x%x\n",
+		   ret, addr);
+	return 0;
+}
+
+void ch7006_state_load(struct i2c_client *client,
+		       struct ch7006_state *state)
+{
+	ch7006_load_reg(client, state, CH7006_POWER);
+
+	ch7006_load_reg(client, state, CH7006_DISPMODE);
+	ch7006_load_reg(client, state, CH7006_FFILTER);
+	ch7006_load_reg(client, state, CH7006_BWIDTH);
+	ch7006_load_reg(client, state, CH7006_INPUT_FORMAT);
+	ch7006_load_reg(client, state, CH7006_CLKMODE);
+	ch7006_load_reg(client, state, CH7006_START_ACTIVE);
+	ch7006_load_reg(client, state, CH7006_POV);
+	ch7006_load_reg(client, state, CH7006_BLACK_LEVEL);
+	ch7006_load_reg(client, state, CH7006_HPOS);
+	ch7006_load_reg(client, state, CH7006_VPOS);
+	ch7006_load_reg(client, state, CH7006_INPUT_SYNC);
+	ch7006_load_reg(client, state, CH7006_DETECT);
+	ch7006_load_reg(client, state, CH7006_CONTRAST);
+	ch7006_load_reg(client, state, CH7006_PLLOV);
+	ch7006_load_reg(client, state, CH7006_PLLM);
+	ch7006_load_reg(client, state, CH7006_PLLN);
+	ch7006_load_reg(client, state, CH7006_BCLKOUT);
+	ch7006_load_reg(client, state, CH7006_SUBC_INC0);
+	ch7006_load_reg(client, state, CH7006_SUBC_INC1);
+	ch7006_load_reg(client, state, CH7006_SUBC_INC2);
+	ch7006_load_reg(client, state, CH7006_SUBC_INC3);
+	ch7006_load_reg(client, state, CH7006_SUBC_INC4);
+	ch7006_load_reg(client, state, CH7006_SUBC_INC5);
+	ch7006_load_reg(client, state, CH7006_SUBC_INC6);
+	ch7006_load_reg(client, state, CH7006_SUBC_INC7);
+	ch7006_load_reg(client, state, CH7006_PLL_CONTROL);
+	ch7006_load_reg(client, state, CH7006_CALC_SUBC_INC0);
+
+	/* I don't know what this is for, but otherwise I get no
+	 * signal.
+	 */
+	ch7006_write(client, 0x3d, 0x0);
+}
+
+void ch7006_state_save(struct i2c_client *client,
+		       struct ch7006_state *state)
+{
+	ch7006_save_reg(client, state, CH7006_POWER);
+
+	ch7006_save_reg(client, state, CH7006_DISPMODE);
+	ch7006_save_reg(client, state, CH7006_FFILTER);
+	ch7006_save_reg(client, state, CH7006_BWIDTH);
+	ch7006_save_reg(client, state, CH7006_INPUT_FORMAT);
+	ch7006_save_reg(client, state, CH7006_CLKMODE);
+	ch7006_save_reg(client, state, CH7006_START_ACTIVE);
+	ch7006_save_reg(client, state, CH7006_POV);
+	ch7006_save_reg(client, state, CH7006_BLACK_LEVEL);
+	ch7006_save_reg(client, state, CH7006_HPOS);
+	ch7006_save_reg(client, state, CH7006_VPOS);
+	ch7006_save_reg(client, state, CH7006_INPUT_SYNC);
+	ch7006_save_reg(client, state, CH7006_DETECT);
+	ch7006_save_reg(client, state, CH7006_CONTRAST);
+	ch7006_save_reg(client, state, CH7006_PLLOV);
+	ch7006_save_reg(client, state, CH7006_PLLM);
+	ch7006_save_reg(client, state, CH7006_PLLN);
+	ch7006_save_reg(client, state, CH7006_BCLKOUT);
+	ch7006_save_reg(client, state, CH7006_SUBC_INC0);
+	ch7006_save_reg(client, state, CH7006_SUBC_INC1);
+	ch7006_save_reg(client, state, CH7006_SUBC_INC2);
+	ch7006_save_reg(client, state, CH7006_SUBC_INC3);
+	ch7006_save_reg(client, state, CH7006_SUBC_INC4);
+	ch7006_save_reg(client, state, CH7006_SUBC_INC5);
+	ch7006_save_reg(client, state, CH7006_SUBC_INC6);
+	ch7006_save_reg(client, state, CH7006_SUBC_INC7);
+	ch7006_save_reg(client, state, CH7006_PLL_CONTROL);
+	ch7006_save_reg(client, state, CH7006_CALC_SUBC_INC0);
+
+	state->regs[CH7006_FFILTER] = (state->regs[CH7006_FFILTER] & 0xf0) |
+		(state->regs[CH7006_FFILTER] & 0x0c) >> 2 |
+		(state->regs[CH7006_FFILTER] & 0x03) << 2;
+}
diff --git a/drivers/gpu/drm/i2c/ch7006_priv.h b/drivers/gpu/drm/i2c/ch7006_priv.h
new file mode 100644
index 0000000..b06d3d9
--- /dev/null
+++ b/drivers/gpu/drm/i2c/ch7006_priv.h
@@ -0,0 +1,344 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __DRM_I2C_CH7006_PRIV_H__
+#define __DRM_I2C_CH7006_PRIV_H__
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+#include "drm_encoder_slave.h"
+#include "i2c/ch7006.h"
+
+typedef int64_t fixed;
+#define fixed1 (1LL << 32)
+
+enum ch7006_tv_norm {
+	TV_NORM_PAL,
+	TV_NORM_PAL_M,
+	TV_NORM_PAL_N,
+	TV_NORM_PAL_NC,
+	TV_NORM_PAL_60,
+	TV_NORM_NTSC_M,
+	TV_NORM_NTSC_J,
+	NUM_TV_NORMS
+};
+
+struct ch7006_tv_norm_info {
+	fixed vrefresh;
+	int vdisplay;
+	int vtotal;
+	int hvirtual;
+
+	fixed subc_freq;
+	fixed black_level;
+
+	uint32_t dispmode;
+	int voffset;
+};
+
+struct ch7006_mode {
+	struct drm_display_mode mode;
+
+	int enc_hdisp;
+	int enc_vdisp;
+
+	fixed subc_coeff;
+	uint32_t dispmode;
+
+	uint32_t valid_scales;
+	uint32_t valid_norms;
+};
+
+struct ch7006_state {
+	uint8_t regs[0x26];
+};
+
+struct ch7006_priv {
+	struct ch7006_encoder_params *params;
+	struct ch7006_mode *mode;
+
+	struct ch7006_state state;
+	struct ch7006_state saved_state;
+
+	struct drm_property *scale_property;
+
+	int select_subconnector;
+	int subconnector;
+	int hmargin;
+	int vmargin;
+	enum ch7006_tv_norm norm;
+	int brightness;
+	int contrast;
+	int flicker;
+	int scale;
+
+	int last_dpms;
+};
+
+#define to_ch7006_priv(x) \
+	((struct ch7006_priv *)to_encoder_slave(x)->slave_priv)
+
+extern int ch7006_debug;
+extern char *ch7006_tv_norm;
+extern int ch7006_scale;
+
+extern char *ch7006_tv_norm_names[];
+extern struct ch7006_tv_norm_info ch7006_tv_norms[];
+extern struct ch7006_mode ch7006_modes[];
+
+struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
+				       struct drm_display_mode *drm_mode);
+
+void ch7006_setup_levels(struct drm_encoder *encoder);
+void ch7006_setup_subcarrier(struct drm_encoder *encoder);
+void ch7006_setup_pll(struct drm_encoder *encoder);
+void ch7006_setup_power_state(struct drm_encoder *encoder);
+void ch7006_setup_properties(struct drm_encoder *encoder);
+
+void ch7006_write(struct i2c_client *client, uint8_t addr, uint8_t val);
+uint8_t ch7006_read(struct i2c_client *client, uint8_t addr);
+
+void ch7006_state_load(struct i2c_client *client,
+		       struct ch7006_state *state);
+void ch7006_state_save(struct i2c_client *client,
+		       struct ch7006_state *state);
+
+/* Some helper macros */
+
+#define ch7006_dbg(client, format, ...) do {				\
+		if (ch7006_debug)					\
+			dev_printk(KERN_DEBUG, &client->dev,		\
+				   "%s: " format, __func__, ## __VA_ARGS__); \
+	} while (0)
+#define ch7006_info(client, format, ...) \
+				dev_info(&client->dev, format, __VA_ARGS__)
+#define ch7006_err(client, format, ...) \
+				dev_err(&client->dev, format, __VA_ARGS__)
+
+#define __mask(src, bitfield) \
+		(((2 << (1 ? bitfield)) - 1) & ~((1 << (0 ? bitfield)) - 1))
+#define mask(bitfield) __mask(bitfield)
+
+#define __bitf(src, bitfield, x) \
+		(((x) >> (src) << (0 ? bitfield)) &  __mask(src, bitfield))
+#define bitf(bitfield, x) __bitf(bitfield, x)
+#define bitfs(bitfield, s) __bitf(bitfield, bitfield##_##s)
+#define setbitf(state, reg, bitfield, x)				\
+	state->regs[reg] = (state->regs[reg] & ~mask(reg##_##bitfield))	\
+		| bitf(reg##_##bitfield, x)
+
+#define __unbitf(src, bitfield, x) \
+		((x & __mask(src, bitfield)) >> (0 ? bitfield) << (src))
+#define unbitf(bitfield, x) __unbitf(bitfield, x)
+
+static inline int interpolate(int y0, int y1, int y2, int x)
+{
+	return y1 + (x < 50 ? y1 - y0 : y2 - y1) * (x - 50) / 50;
+}
+
+static inline int32_t round_fixed(fixed x)
+{
+	return (x + fixed1/2) >> 32;
+}
+
+#define ch7006_load_reg(client, state, reg) ch7006_write(client, reg, state->regs[reg])
+#define ch7006_save_reg(client, state, reg) state->regs[reg] = ch7006_read(client, reg)
+
+/* Fixed hardware specs */
+
+#define CH7006_FREQ0				14318
+#define CH7006_MAXN				650
+#define CH7006_MAXM				315
+
+/* Register definitions */
+
+#define CH7006_DISPMODE				0x00
+#define CH7006_DISPMODE_INPUT_RES		0, 7:5
+#define CH7006_DISPMODE_INPUT_RES_512x384	0x0
+#define CH7006_DISPMODE_INPUT_RES_720x400	0x1
+#define CH7006_DISPMODE_INPUT_RES_640x400	0x2
+#define CH7006_DISPMODE_INPUT_RES_640x480	0x3
+#define CH7006_DISPMODE_INPUT_RES_800x600	0x4
+#define CH7006_DISPMODE_INPUT_RES_NATIVE	0x5
+#define CH7006_DISPMODE_OUTPUT_STD		0, 4:3
+#define CH7006_DISPMODE_OUTPUT_STD_PAL		0x0
+#define CH7006_DISPMODE_OUTPUT_STD_NTSC		0x1
+#define CH7006_DISPMODE_OUTPUT_STD_PAL_M	0x2
+#define CH7006_DISPMODE_OUTPUT_STD_NTSC_J	0x3
+#define CH7006_DISPMODE_SCALING_RATIO		0, 2:0
+#define CH7006_DISPMODE_SCALING_RATIO_5_4	0x0
+#define CH7006_DISPMODE_SCALING_RATIO_1_1	0x1
+#define CH7006_DISPMODE_SCALING_RATIO_7_8	0x2
+#define CH7006_DISPMODE_SCALING_RATIO_5_6	0x3
+#define CH7006_DISPMODE_SCALING_RATIO_3_4	0x4
+#define CH7006_DISPMODE_SCALING_RATIO_7_10	0x5
+
+#define CH7006_FFILTER				0x01
+#define CH7006_FFILTER_TEXT			0, 5:4
+#define CH7006_FFILTER_LUMA			0, 3:2
+#define CH7006_FFILTER_CHROMA			0, 1:0
+#define CH7006_FFILTER_CHROMA_NO_DCRAWL		0x3
+
+#define CH7006_BWIDTH				0x03
+#define CH7006_BWIDTH_5L_FFILER			(1 << 7)
+#define CH7006_BWIDTH_CVBS_NO_CHROMA		(1 << 6)
+#define CH7006_BWIDTH_CHROMA			0, 5:4
+#define CH7006_BWIDTH_SVIDEO_YPEAK		(1 << 3)
+#define CH7006_BWIDTH_SVIDEO_LUMA		0, 2:1
+#define CH7006_BWIDTH_CVBS_LUMA			0, 0:0
+
+#define CH7006_INPUT_FORMAT			0x04
+#define CH7006_INPUT_FORMAT_DAC_GAIN		(1 << 6)
+#define CH7006_INPUT_FORMAT_RGB_PASS_THROUGH	(1 << 5)
+#define CH7006_INPUT_FORMAT_FORMAT		0, 3:0
+#define CH7006_INPUT_FORMAT_FORMAT_RGB16	0x0
+#define CH7006_INPUT_FORMAT_FORMAT_YCrCb24m16	0x1
+#define CH7006_INPUT_FORMAT_FORMAT_RGB24m16	0x2
+#define CH7006_INPUT_FORMAT_FORMAT_RGB15	0x3
+#define CH7006_INPUT_FORMAT_FORMAT_RGB24m12C	0x4
+#define CH7006_INPUT_FORMAT_FORMAT_RGB24m12I	0x5
+#define CH7006_INPUT_FORMAT_FORMAT_RGB24m8	0x6
+#define CH7006_INPUT_FORMAT_FORMAT_RGB16m8	0x7
+#define CH7006_INPUT_FORMAT_FORMAT_RGB15m8	0x8
+#define CH7006_INPUT_FORMAT_FORMAT_YCrCb24m8	0x9
+
+#define CH7006_CLKMODE				0x06
+#define CH7006_CLKMODE_SUBC_LOCK		(1 << 7)
+#define CH7006_CLKMODE_MASTER			(1 << 6)
+#define CH7006_CLKMODE_POS_EDGE			(1 << 4)
+#define CH7006_CLKMODE_XCM			0, 3:2
+#define CH7006_CLKMODE_PCM			0, 1:0
+
+#define CH7006_START_ACTIVE			0x07
+#define CH7006_START_ACTIVE_0			0, 7:0
+
+#define CH7006_POV				0x08
+#define CH7006_POV_START_ACTIVE_8		8, 2:2
+#define CH7006_POV_HPOS_8			8, 1:1
+#define CH7006_POV_VPOS_8			8, 0:0
+
+#define CH7006_BLACK_LEVEL			0x09
+#define CH7006_BLACK_LEVEL_0			0, 7:0
+
+#define CH7006_HPOS				0x0a
+#define CH7006_HPOS_0				0, 7:0
+
+#define CH7006_VPOS				0x0b
+#define CH7006_VPOS_0				0, 7:0
+
+#define CH7006_INPUT_SYNC			0x0d
+#define CH7006_INPUT_SYNC_EMBEDDED		(1 << 3)
+#define CH7006_INPUT_SYNC_OUTPUT		(1 << 2)
+#define CH7006_INPUT_SYNC_PVSYNC		(1 << 1)
+#define CH7006_INPUT_SYNC_PHSYNC		(1 << 0)
+
+#define CH7006_POWER				0x0e
+#define CH7006_POWER_SCART			(1 << 4)
+#define CH7006_POWER_RESET			(1 << 3)
+#define CH7006_POWER_LEVEL			0, 2:0
+#define CH7006_POWER_LEVEL_CVBS_OFF		0x0
+#define CH7006_POWER_LEVEL_POWER_OFF		0x1
+#define CH7006_POWER_LEVEL_SVIDEO_OFF		0x2
+#define CH7006_POWER_LEVEL_NORMAL		0x3
+#define CH7006_POWER_LEVEL_FULL_POWER_OFF	0x4
+
+#define CH7006_DETECT				0x10
+#define CH7006_DETECT_SVIDEO_Y_TEST		(1 << 3)
+#define CH7006_DETECT_SVIDEO_C_TEST		(1 << 2)
+#define CH7006_DETECT_CVBS_TEST			(1 << 1)
+#define CH7006_DETECT_SENSE			(1 << 0)
+
+#define CH7006_CONTRAST				0x11
+#define CH7006_CONTRAST_0			0, 2:0
+
+#define CH7006_PLLOV	 			0x13
+#define CH7006_PLLOV_N_8	 		8, 2:1
+#define CH7006_PLLOV_M_8	 		8, 0:0
+
+#define CH7006_PLLM	 			0x14
+#define CH7006_PLLM_0	 			0, 7:0
+
+#define CH7006_PLLN	 			0x15
+#define CH7006_PLLN_0	 			0, 7:0
+
+#define CH7006_BCLKOUT	 			0x17
+
+#define CH7006_SUBC_INC0			0x18
+#define CH7006_SUBC_INC0_28			28, 3:0
+
+#define CH7006_SUBC_INC1			0x19
+#define CH7006_SUBC_INC1_24			24, 3:0
+
+#define CH7006_SUBC_INC2			0x1a
+#define CH7006_SUBC_INC2_20			20, 3:0
+
+#define CH7006_SUBC_INC3			0x1b
+#define CH7006_SUBC_INC3_GPIO1_VAL		(1 << 7)
+#define CH7006_SUBC_INC3_GPIO0_VAL		(1 << 6)
+#define CH7006_SUBC_INC3_POUT_3_3V		(1 << 5)
+#define CH7006_SUBC_INC3_POUT_INV		(1 << 4)
+#define CH7006_SUBC_INC3_16			16, 3:0
+
+#define CH7006_SUBC_INC4			0x1c
+#define CH7006_SUBC_INC4_GPIO1_IN		(1 << 7)
+#define CH7006_SUBC_INC4_GPIO0_IN		(1 << 6)
+#define CH7006_SUBC_INC4_DS_INPUT		(1 << 4)
+#define CH7006_SUBC_INC4_12			12, 3:0
+
+#define CH7006_SUBC_INC5			0x1d
+#define CH7006_SUBC_INC5_8			8, 3:0
+
+#define CH7006_SUBC_INC6			0x1e
+#define CH7006_SUBC_INC6_4			4, 3:0
+
+#define CH7006_SUBC_INC7			0x1f
+#define CH7006_SUBC_INC7_0			0, 3:0
+
+#define CH7006_PLL_CONTROL			0x20
+#define CH7006_PLL_CONTROL_CPI			(1 << 5)
+#define CH7006_PLL_CONTROL_CAPACITOR		(1 << 4)
+#define CH7006_PLL_CONTROL_7STAGES		(1 << 3)
+#define CH7006_PLL_CONTROL_DIGITAL_5V		(1 << 2)
+#define CH7006_PLL_CONTROL_ANALOG_5V		(1 << 1)
+#define CH7006_PLL_CONTROL_MEMORY_5V		(1 << 0)
+
+#define CH7006_CALC_SUBC_INC0			0x21
+#define CH7006_CALC_SUBC_INC0_24		24, 4:3
+#define CH7006_CALC_SUBC_INC0_HYST		0, 2:1
+#define CH7006_CALC_SUBC_INC0_AUTO		(1 << 0)
+
+#define CH7006_CALC_SUBC_INC1			0x22
+#define CH7006_CALC_SUBC_INC1_16		16, 7:0
+
+#define CH7006_CALC_SUBC_INC2			0x23
+#define CH7006_CALC_SUBC_INC2_8			8, 7:0
+
+#define CH7006_CALC_SUBC_INC3			0x24
+#define CH7006_CALC_SUBC_INC3_0			0, 7:0
+
+#define CH7006_VERSION_ID			0x25
+
+#endif
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index fa7b9be..9929f84 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -15,7 +15,6 @@
 	  intel_lvds.o \
 	  intel_bios.o \
 	  intel_dp.o \
-	  intel_dp_i2c.o \
 	  intel_hdmi.o \
 	  intel_sdvo.o \
 	  intel_modes.o \
@@ -23,6 +22,7 @@
 	  intel_fb.o \
 	  intel_tv.o \
 	  intel_dvo.o \
+	  intel_overlay.o \
 	  dvo_ch7xxx.o \
 	  dvo_ch7017.o \
 	  dvo_ivch.o \
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 621815b..1184c14 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -249,7 +249,8 @@
 	if (val != CH7017_DEVICE_ID_VALUE &&
 	    val != CH7018_DEVICE_ID_VALUE &&
 	    val != CH7019_DEVICE_ID_VALUE) {
-		DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n",
+		DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
+				"Slave %d.\n",
 			  val, i2cbus->adapter.name,dvo->slave_addr);
 		goto fail;
 	}
@@ -284,7 +285,7 @@
 	uint8_t horizontal_active_pixel_output, vertical_active_line_output;
 	uint8_t active_input_line_output;
 
-	DRM_DEBUG("Registers before mode setting\n");
+	DRM_DEBUG_KMS("Registers before mode setting\n");
 	ch7017_dump_regs(dvo);
 
 	/* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/
@@ -346,7 +347,7 @@
 	/* Turn the LVDS back on with new settings. */
 	ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down);
 
-	DRM_DEBUG("Registers after mode setting\n");
+	DRM_DEBUG_KMS("Registers after mode setting\n");
 	ch7017_dump_regs(dvo);
 }
 
@@ -386,7 +387,7 @@
 #define DUMP(reg)					\
 do {							\
 	ch7017_read(dvo, reg, &val);			\
-	DRM_DEBUG(#reg ": %02x\n", val);		\
+	DRM_DEBUG_KMS(#reg ": %02x\n", val);		\
 } while (0)
 
 	DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT);
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index a9b8962..d56ff5c 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -152,7 +152,7 @@
 	};
 
 	if (!ch7xxx->quiet) {
-		DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
 			  addr, i2cbus->adapter.name, dvo->slave_addr);
 	}
 	return false;
@@ -179,7 +179,7 @@
 		return true;
 
 	if (!ch7xxx->quiet) {
-		DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
 			  addr, i2cbus->adapter.name, dvo->slave_addr);
 	}
 
@@ -207,7 +207,8 @@
 
 	name = ch7xxx_get_id(vendor);
 	if (!name) {
-		DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
+		DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
+				"slave %d.\n",
 			  vendor, adapter->name, dvo->slave_addr);
 		goto out;
 	}
@@ -217,13 +218,14 @@
 		goto out;
 
 	if (device != CH7xxx_DID) {
-		DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
+		DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
+				"slave %d.\n",
 			  vendor, adapter->name, dvo->slave_addr);
 		goto out;
 	}
 
 	ch7xxx->quiet = false;
-	DRM_DEBUG("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
+	DRM_DEBUG_KMS("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
 		  name, vendor, device);
 	return true;
 out:
@@ -315,8 +317,8 @@
 
 	for (i = 0; i < CH7xxx_NUM_REGS; i++) {
 		if ((i % 8) == 0 )
-			DRM_DEBUG("\n %02X: ", i);
-		DRM_DEBUG("%02X ", ch7xxx->mode_reg.regs[i]);
+			DRM_LOG_KMS("\n %02X: ", i);
+		DRM_LOG_KMS("%02X ", ch7xxx->mode_reg.regs[i]);
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index aa176f9..24169e5 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -202,7 +202,8 @@
 	};
 
 	if (!priv->quiet) {
-		DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+		DRM_DEBUG_KMS("Unable to read register 0x%02x from "
+				"%s:%02x.\n",
 			  addr, i2cbus->adapter.name, dvo->slave_addr);
 	}
 	return false;
@@ -230,7 +231,7 @@
 		return true;
 
 	if (!priv->quiet) {
-		DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
 			  addr, i2cbus->adapter.name, dvo->slave_addr);
 	}
 
@@ -261,7 +262,7 @@
 	 * the address it's responding on.
 	 */
 	if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) {
-		DRM_DEBUG("ivch detect failed due to address mismatch "
+		DRM_DEBUG_KMS("ivch detect failed due to address mismatch "
 			  "(%d vs %d)\n",
 			  (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr);
 		goto out;
@@ -367,41 +368,41 @@
 	uint16_t val;
 
 	ivch_read(dvo, VR00, &val);
-	DRM_DEBUG("VR00: 0x%04x\n", val);
+	DRM_LOG_KMS("VR00: 0x%04x\n", val);
 	ivch_read(dvo, VR01, &val);
-	DRM_DEBUG("VR01: 0x%04x\n", val);
+	DRM_LOG_KMS("VR01: 0x%04x\n", val);
 	ivch_read(dvo, VR30, &val);
-	DRM_DEBUG("VR30: 0x%04x\n", val);
+	DRM_LOG_KMS("VR30: 0x%04x\n", val);
 	ivch_read(dvo, VR40, &val);
-	DRM_DEBUG("VR40: 0x%04x\n", val);
+	DRM_LOG_KMS("VR40: 0x%04x\n", val);
 
 	/* GPIO registers */
 	ivch_read(dvo, VR80, &val);
-	DRM_DEBUG("VR80: 0x%04x\n", val);
+	DRM_LOG_KMS("VR80: 0x%04x\n", val);
 	ivch_read(dvo, VR81, &val);
-	DRM_DEBUG("VR81: 0x%04x\n", val);
+	DRM_LOG_KMS("VR81: 0x%04x\n", val);
 	ivch_read(dvo, VR82, &val);
-	DRM_DEBUG("VR82: 0x%04x\n", val);
+	DRM_LOG_KMS("VR82: 0x%04x\n", val);
 	ivch_read(dvo, VR83, &val);
-	DRM_DEBUG("VR83: 0x%04x\n", val);
+	DRM_LOG_KMS("VR83: 0x%04x\n", val);
 	ivch_read(dvo, VR84, &val);
-	DRM_DEBUG("VR84: 0x%04x\n", val);
+	DRM_LOG_KMS("VR84: 0x%04x\n", val);
 	ivch_read(dvo, VR85, &val);
-	DRM_DEBUG("VR85: 0x%04x\n", val);
+	DRM_LOG_KMS("VR85: 0x%04x\n", val);
 	ivch_read(dvo, VR86, &val);
-	DRM_DEBUG("VR86: 0x%04x\n", val);
+	DRM_LOG_KMS("VR86: 0x%04x\n", val);
 	ivch_read(dvo, VR87, &val);
-	DRM_DEBUG("VR87: 0x%04x\n", val);
+	DRM_LOG_KMS("VR87: 0x%04x\n", val);
 	ivch_read(dvo, VR88, &val);
-	DRM_DEBUG("VR88: 0x%04x\n", val);
+	DRM_LOG_KMS("VR88: 0x%04x\n", val);
 
 	/* Scratch register 0 - AIM Panel type */
 	ivch_read(dvo, VR8E, &val);
-	DRM_DEBUG("VR8E: 0x%04x\n", val);
+	DRM_LOG_KMS("VR8E: 0x%04x\n", val);
 
 	/* Scratch register 1 - Status register */
 	ivch_read(dvo, VR8F, &val);
-	DRM_DEBUG("VR8F: 0x%04x\n", val);
+	DRM_LOG_KMS("VR8F: 0x%04x\n", val);
 }
 
 static void ivch_save(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index e1c1f73..0001c13 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -105,7 +105,7 @@
 	};
 
 	if (!sil->quiet) {
-		DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
 			  addr, i2cbus->adapter.name, dvo->slave_addr);
 	}
 	return false;
@@ -131,7 +131,7 @@
 		return true;
 
 	if (!sil->quiet) {
-		DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
 			  addr, i2cbus->adapter.name, dvo->slave_addr);
 	}
 
@@ -158,7 +158,7 @@
 		goto out;
 
 	if (ch != (SIL164_VID & 0xff)) {
-		DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
+		DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
 			  ch, adapter->name, dvo->slave_addr);
 		goto out;
 	}
@@ -167,13 +167,13 @@
 		goto out;
 
 	if (ch != (SIL164_DID & 0xff)) {
-		DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
+		DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
 			  ch, adapter->name, dvo->slave_addr);
 		goto out;
 	}
 	sil->quiet = false;
 
-	DRM_DEBUG("init sil164 dvo controller successfully!\n");
+	DRM_DEBUG_KMS("init sil164 dvo controller successfully!\n");
 	return true;
 
 out:
@@ -241,15 +241,15 @@
 	uint8_t val;
 
 	sil164_readb(dvo, SIL164_FREQ_LO, &val);
-	DRM_DEBUG("SIL164_FREQ_LO: 0x%02x\n", val);
+	DRM_LOG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
 	sil164_readb(dvo, SIL164_FREQ_HI, &val);
-	DRM_DEBUG("SIL164_FREQ_HI: 0x%02x\n", val);
+	DRM_LOG_KMS("SIL164_FREQ_HI: 0x%02x\n", val);
 	sil164_readb(dvo, SIL164_REG8, &val);
-	DRM_DEBUG("SIL164_REG8: 0x%02x\n", val);
+	DRM_LOG_KMS("SIL164_REG8: 0x%02x\n", val);
 	sil164_readb(dvo, SIL164_REG9, &val);
-	DRM_DEBUG("SIL164_REG9: 0x%02x\n", val);
+	DRM_LOG_KMS("SIL164_REG9: 0x%02x\n", val);
 	sil164_readb(dvo, SIL164_REGC, &val);
-	DRM_DEBUG("SIL164_REGC: 0x%02x\n", val);
+	DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val);
 }
 
 static void sil164_save(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 9ecc907..c7c391b 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -130,7 +130,7 @@
 	};
 
 	if (!tfp->quiet) {
-		DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
 			  addr, i2cbus->adapter.name, dvo->slave_addr);
 	}
 	return false;
@@ -156,7 +156,7 @@
 		return true;
 
 	if (!tfp->quiet) {
-		DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
 			  addr, i2cbus->adapter.name, dvo->slave_addr);
 	}
 
@@ -191,13 +191,15 @@
 	tfp->quiet = true;
 
 	if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
-		DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n",
+		DRM_DEBUG_KMS("tfp410 not detected got VID %X: from %s "
+				"Slave %d.\n",
 			  id, adapter->name, dvo->slave_addr);
 		goto out;
 	}
 
 	if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
-		DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n",
+		DRM_DEBUG_KMS("tfp410 not detected got DID %X: from %s "
+				"Slave %d.\n",
 			  id, adapter->name, dvo->slave_addr);
 		goto out;
 	}
@@ -262,33 +264,33 @@
 	uint8_t val, val2;
 
 	tfp410_readb(dvo, TFP410_REV, &val);
-	DRM_DEBUG("TFP410_REV: 0x%02X\n", val);
+	DRM_LOG_KMS("TFP410_REV: 0x%02X\n", val);
 	tfp410_readb(dvo, TFP410_CTL_1, &val);
-	DRM_DEBUG("TFP410_CTL1: 0x%02X\n", val);
+	DRM_LOG_KMS("TFP410_CTL1: 0x%02X\n", val);
 	tfp410_readb(dvo, TFP410_CTL_2, &val);
-	DRM_DEBUG("TFP410_CTL2: 0x%02X\n", val);
+	DRM_LOG_KMS("TFP410_CTL2: 0x%02X\n", val);
 	tfp410_readb(dvo, TFP410_CTL_3, &val);
-	DRM_DEBUG("TFP410_CTL3: 0x%02X\n", val);
+	DRM_LOG_KMS("TFP410_CTL3: 0x%02X\n", val);
 	tfp410_readb(dvo, TFP410_USERCFG, &val);
-	DRM_DEBUG("TFP410_USERCFG: 0x%02X\n", val);
+	DRM_LOG_KMS("TFP410_USERCFG: 0x%02X\n", val);
 	tfp410_readb(dvo, TFP410_DE_DLY, &val);
-	DRM_DEBUG("TFP410_DE_DLY: 0x%02X\n", val);
+	DRM_LOG_KMS("TFP410_DE_DLY: 0x%02X\n", val);
 	tfp410_readb(dvo, TFP410_DE_CTL, &val);
-	DRM_DEBUG("TFP410_DE_CTL: 0x%02X\n", val);
+	DRM_LOG_KMS("TFP410_DE_CTL: 0x%02X\n", val);
 	tfp410_readb(dvo, TFP410_DE_TOP, &val);
-	DRM_DEBUG("TFP410_DE_TOP: 0x%02X\n", val);
+	DRM_LOG_KMS("TFP410_DE_TOP: 0x%02X\n", val);
 	tfp410_readb(dvo, TFP410_DE_CNT_LO, &val);
 	tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2);
-	DRM_DEBUG("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
+	DRM_LOG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
 	tfp410_readb(dvo, TFP410_DE_LIN_LO, &val);
 	tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2);
-	DRM_DEBUG("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
+	DRM_LOG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
 	tfp410_readb(dvo, TFP410_H_RES_LO, &val);
 	tfp410_readb(dvo, TFP410_H_RES_HI, &val2);
-	DRM_DEBUG("TFP410_H_RES: 0x%02X%02X\n", val2, val);
+	DRM_LOG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val);
 	tfp410_readb(dvo, TFP410_V_RES_LO, &val);
 	tfp410_readb(dvo, TFP410_V_RES_HI, &val2);
-	DRM_DEBUG("TFP410_V_RES: 0x%02X%02X\n", val2, val);
+	DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
 }
 
 static void tfp410_save(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 26bf055..18476bf 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -27,6 +27,7 @@
  */
 
 #include <linux/seq_file.h>
+#include <linux/debugfs.h>
 #include "drmP.h"
 #include "drm.h"
 #include "i915_drm.h"
@@ -96,13 +97,14 @@
 	{
 		struct drm_gem_object *obj = obj_priv->obj;
 
-		seq_printf(m, "    %p: %s %8zd %08x %08x %d %s",
+		seq_printf(m, "    %p: %s %8zd %08x %08x %d%s%s",
 			   obj,
 			   get_pin_flag(obj_priv),
 			   obj->size,
 			   obj->read_domains, obj->write_domain,
 			   obj_priv->last_rendering_seqno,
-			   obj_priv->dirty ? "dirty" : "");
+			   obj_priv->dirty ? " dirty" : "",
+			   obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 
 		if (obj->name)
 			seq_printf(m, " (name: %d)", obj->name);
@@ -160,7 +162,7 @@
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
-	if (!IS_IGDNG(dev)) {
+	if (!IS_IRONLAKE(dev)) {
 		seq_printf(m, "Interrupt enable:    %08x\n",
 			   I915_READ(IER));
 		seq_printf(m, "Interrupt identity:  %08x\n",
@@ -412,6 +414,109 @@
 	return 0;
 }
 
+static int
+i915_wedged_open(struct inode *inode,
+		 struct file *filp)
+{
+	filp->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t
+i915_wedged_read(struct file *filp,
+		 char __user *ubuf,
+		 size_t max,
+		 loff_t *ppos)
+{
+	struct drm_device *dev = filp->private_data;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	char buf[80];
+	int len;
+
+	len = snprintf(buf, sizeof (buf),
+		       "wedged :  %d\n",
+		       atomic_read(&dev_priv->mm.wedged));
+
+	return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+}
+
+static ssize_t
+i915_wedged_write(struct file *filp,
+		  const char __user *ubuf,
+		  size_t cnt,
+		  loff_t *ppos)
+{
+	struct drm_device *dev = filp->private_data;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	char buf[20];
+	int val = 1;
+
+	if (cnt > 0) {
+		if (cnt > sizeof (buf) - 1)
+			return -EINVAL;
+
+		if (copy_from_user(buf, ubuf, cnt))
+			return -EFAULT;
+		buf[cnt] = 0;
+
+		val = simple_strtoul(buf, NULL, 0);
+	}
+
+	DRM_INFO("Manually setting wedged to %d\n", val);
+
+	atomic_set(&dev_priv->mm.wedged, val);
+	if (val) {
+		DRM_WAKEUP(&dev_priv->irq_queue);
+		queue_work(dev_priv->wq, &dev_priv->error_work);
+	}
+
+	return cnt;
+}
+
+static const struct file_operations i915_wedged_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_wedged_open,
+	.read = i915_wedged_read,
+	.write = i915_wedged_write,
+};
+
+/* As the drm_debugfs_init() routines are called before dev->dev_private is
+ * allocated we need to hook into the minor for release. */
+static int
+drm_add_fake_info_node(struct drm_minor *minor,
+		       struct dentry *ent,
+		       const void *key)
+{
+	struct drm_info_node *node;
+
+	node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
+	if (node == NULL) {
+		debugfs_remove(ent);
+		return -ENOMEM;
+	}
+
+	node->minor = minor;
+	node->dent = ent;
+	node->info_ent = (void *) key;
+	list_add(&node->list, &minor->debugfs_nodes.list);
+
+	return 0;
+}
+
+static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
+{
+	struct drm_device *dev = minor->dev;
+	struct dentry *ent;
+
+	ent = debugfs_create_file("i915_wedged",
+				  S_IRUGO | S_IWUSR,
+				  root, dev,
+				  &i915_wedged_fops);
+	if (IS_ERR(ent))
+		return PTR_ERR(ent);
+
+	return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
+}
 
 static struct drm_info_list i915_debugfs_list[] = {
 	{"i915_regs", i915_registers_info, 0},
@@ -432,6 +537,12 @@
 
 int i915_debugfs_init(struct drm_minor *minor)
 {
+	int ret;
+
+	ret = i915_wedged_create(minor->debugfs_root, minor);
+	if (ret)
+		return ret;
+
 	return drm_debugfs_create_files(i915_debugfs_list,
 					I915_DEBUGFS_ENTRIES,
 					minor->debugfs_root, minor);
@@ -441,7 +552,8 @@
 {
 	drm_debugfs_remove_files(i915_debugfs_list,
 				 I915_DEBUGFS_ENTRIES, minor);
+	drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
+				 1, minor);
 }
 
 #endif /* CONFIG_DEBUG_FS */
-
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index e5b138b..701bfea 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -807,6 +807,12 @@
 	case I915_PARAM_NUM_FENCES_AVAIL:
 		value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
 		break;
+	case I915_PARAM_HAS_OVERLAY:
+		value = dev_priv->overlay ? 1 : 0;
+		break;
+	case I915_PARAM_HAS_PAGEFLIPPING:
+		value = 1;
+		break;
 	default:
 		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
 					param->param);
@@ -962,7 +968,7 @@
 	 * Some of the preallocated space is taken by the GTT
 	 * and popup.  GTT is 1K per MB of aperture size, and popup is 4K.
 	 */
-	if (IS_G4X(dev) || IS_IGD(dev) || IS_IGDNG(dev))
+	if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev))
 		overhead = 4096;
 	else
 		overhead = (*aperture_size / 1024) + 4096;
@@ -1048,7 +1054,7 @@
 	int gtt_offset, gtt_size;
 
 	if (IS_I965G(dev)) {
-		if (IS_G4X(dev) || IS_IGDNG(dev)) {
+		if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
 			gtt_offset = 2*1024*1024;
 			gtt_size = 2*1024*1024;
 		} else {
@@ -1070,7 +1076,7 @@
 
 	entry = *(volatile u32 *)(gtt + (gtt_addr / 1024));
 
-	DRM_DEBUG("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
+	DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
 
 	/* Mask out these reserved bits on this hardware. */
 	if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) ||
@@ -1096,7 +1102,7 @@
 	phys =(entry & PTE_ADDRESS_MASK) |
 		((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4));
 
-	DRM_DEBUG("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
+	DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
 
 	return phys;
 }
@@ -1306,7 +1312,7 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 tmp;
 
-	if (!IS_IGD(dev))
+	if (!IS_PINEVIEW(dev))
 		return;
 
 	tmp = I915_READ(CLKCFG);
@@ -1413,7 +1419,7 @@
 	if (ret)
 		goto out_iomapfree;
 
-	dev_priv->wq = create_workqueue("i915");
+	dev_priv->wq = create_singlethread_workqueue("i915");
 	if (dev_priv->wq == NULL) {
 		DRM_ERROR("Failed to create our workqueue.\n");
 		ret = -ENOMEM;
@@ -1434,7 +1440,7 @@
 
 	dev->driver->get_vblank_counter = i915_get_vblank_counter;
 	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
-	if (IS_G4X(dev) || IS_IGDNG(dev)) {
+	if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
 		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
 	}
@@ -1489,9 +1495,7 @@
 	}
 
 	/* Must be done after probing outputs */
-	/* FIXME: verify on IGDNG */
-	if (!IS_IGDNG(dev))
-		intel_opregion_init(dev, 0);
+	intel_opregion_init(dev, 0);
 
 	setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
 		    (unsigned long) dev);
@@ -1525,6 +1529,15 @@
 	}
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		/*
+		 * free the memory space allocated for the child device
+		 * config parsed from VBT
+		 */
+		if (dev_priv->child_dev && dev_priv->child_dev_num) {
+			kfree(dev_priv->child_dev);
+			dev_priv->child_dev = NULL;
+			dev_priv->child_dev_num = 0;
+		}
 		drm_irq_uninstall(dev);
 		vga_client_register(dev->pdev, NULL, NULL, NULL);
 	}
@@ -1535,8 +1548,7 @@
 	if (dev_priv->regs != NULL)
 		iounmap(dev_priv->regs);
 
-	if (!IS_IGDNG(dev))
-		intel_opregion_free(dev, 0);
+	intel_opregion_free(dev, 0);
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 		intel_modeset_cleanup(dev);
@@ -1548,6 +1560,8 @@
 		mutex_unlock(&dev->struct_mutex);
 		drm_mm_takedown(&dev_priv->vram);
 		i915_gem_lastclose(dev);
+
+		intel_cleanup_overlay(dev);
 	}
 
 	pci_dev_put(dev_priv->bridge_dev);
@@ -1656,6 +1670,8 @@
 	DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0),
 	DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
 	DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0),
+	DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
+	DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
 };
 
 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 7f436ec..2fa2178 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -333,6 +333,7 @@
 		 .mmap = drm_gem_mmap,
 		 .poll = drm_poll,
 		 .fasync = drm_fasync,
+		 .read = drm_read,
 #ifdef CONFIG_COMPAT
 		 .compat_ioctl = i915_compat_ioctl,
 #endif
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a725f659..fbecac7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -170,6 +170,8 @@
 	/* clock gating init */
 };
 
+struct intel_overlay;
+
 typedef struct drm_i915_private {
 	struct drm_device *dev;
 
@@ -187,6 +189,7 @@
 	unsigned int status_gfx_addr;
 	drm_local_map_t hws_map;
 	struct drm_gem_object *hws_obj;
+	struct drm_gem_object *pwrctx;
 
 	struct resource mch_res;
 
@@ -206,11 +209,13 @@
 	/** Cached value of IMR to avoid reads in updating the bitfield */
 	u32 irq_mask_reg;
 	u32 pipestat[2];
-	/** splitted irq regs for graphics and display engine on IGDNG,
+	/** splitted irq regs for graphics and display engine on Ironlake,
 	    irq_mask_reg is still used for display irq. */
 	u32 gt_irq_mask_reg;
 	u32 gt_irq_enable_reg;
 	u32 de_irq_enable_reg;
+	u32 pch_irq_mask_reg;
+	u32 pch_irq_enable_reg;
 
 	u32 hotplug_supported_mask;
 	struct work_struct hotplug_work;
@@ -240,6 +245,9 @@
 
 	struct intel_opregion opregion;
 
+	/* overlay */
+	struct intel_overlay *overlay;
+
 	/* LVDS info */
 	int backlight_duty_cycle;  /* restore backlight to this value */
 	bool panel_wants_dither;
@@ -258,7 +266,7 @@
 
 	struct notifier_block lid_notifier;
 
-	int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */
+	int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */
 	struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
 	int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
 	int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -280,6 +288,7 @@
 	u32 saveDSPBCNTR;
 	u32 saveDSPARB;
 	u32 saveRENDERSTANDBY;
+	u32 savePWRCTXA;
 	u32 saveHWS;
 	u32 savePIPEACONF;
 	u32 savePIPEBCONF;
@@ -374,8 +383,6 @@
 	u32 saveFDI_RXA_IMR;
 	u32 saveFDI_RXB_IMR;
 	u32 saveCACHE_MODE_0;
-	u32 saveD_STATE;
-	u32 saveDSPCLK_GATE_D;
 	u32 saveMI_ARB_STATE;
 	u32 saveSWF0[16];
 	u32 saveSWF1[16];
@@ -539,13 +546,21 @@
 	/* indicate whether the LVDS_BORDER should be enabled or not */
 	unsigned int lvds_border_bits;
 
+	struct drm_crtc *plane_to_crtc_mapping[2];
+	struct drm_crtc *pipe_to_crtc_mapping[2];
+	wait_queue_head_t pending_flip_queue;
+
 	/* Reclocking support */
 	bool render_reclock_avail;
 	bool lvds_downclock_avail;
+	/* indicates the reduced downclock for LVDS*/
+	int lvds_downclock;
 	struct work_struct idle_work;
 	struct timer_list idle_timer;
 	bool busy;
 	u16 orig_clock;
+	int child_dev_num;
+	struct child_device_config *child_dev;
 } drm_i915_private_t;
 
 /** driver private structure attached to each drm_gem_object */
@@ -638,6 +653,13 @@
 	 * Advice: are the backing pages purgeable?
 	 */
 	int madv;
+
+	/**
+	 * Number of crtcs where this object is currently the fb, but
+	 * will be page flipped away on the next vblank.  When it
+	 * reaches 0, dev_priv->pending_flip_queue will be woken up.
+	 */
+	atomic_t pending_flip;
 };
 
 /**
@@ -738,6 +760,8 @@
 void
 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
 
+void intel_enable_asle (struct drm_device *dev);
+
 
 /* i915_mem.c */
 extern int i915_mem_alloc(struct drm_device *dev, void *data,
@@ -813,6 +837,9 @@
 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
 		     unsigned long end);
 int i915_gem_idle(struct drm_device *dev);
+uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
+			  uint32_t flush_domains);
+int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
 				      int write);
@@ -824,6 +851,7 @@
 int i915_gem_object_get_pages(struct drm_gem_object *obj);
 void i915_gem_object_put_pages(struct drm_gem_object *obj);
 void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
+void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
 
 void i915_gem_shrinker_init(void);
 void i915_gem_shrinker_exit(void);
@@ -863,11 +891,13 @@
 extern int intel_opregion_init(struct drm_device *dev, int resume);
 extern void intel_opregion_free(struct drm_device *dev, int suspend);
 extern void opregion_asle_intr(struct drm_device *dev);
+extern void ironlake_opregion_gse_intr(struct drm_device *dev);
 extern void opregion_enable_asle(struct drm_device *dev);
 #else
 static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; }
 static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; }
 static inline void opregion_asle_intr(struct drm_device *dev) { return; }
+static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; }
 static inline void opregion_enable_asle(struct drm_device *dev) { return; }
 #endif
 
@@ -955,8 +985,8 @@
 #define IS_I830(dev) ((dev)->pci_device == 0x3577)
 #define IS_845G(dev) ((dev)->pci_device == 0x2562)
 #define IS_I85X(dev) ((dev)->pci_device == 0x3582)
-#define IS_I855(dev) ((dev)->pci_device == 0x3582)
 #define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+#define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev))
 
 #define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
 #define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
@@ -990,47 +1020,51 @@
 		     (dev)->pci_device == 0x2E42 || \
 		     IS_GM45(dev))
 
-#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
-#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
-#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
+#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
+#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW(dev) (IS_PINEVIEW_G(dev) || IS_PINEVIEW_M(dev))
 
 #define IS_G33(dev)    ((dev)->pci_device == 0x29C2 ||	\
 			(dev)->pci_device == 0x29B2 ||	\
 			(dev)->pci_device == 0x29D2 ||  \
-			(IS_IGD(dev)))
+			(IS_PINEVIEW(dev)))
 
-#define IS_IGDNG_D(dev)	((dev)->pci_device == 0x0042)
-#define IS_IGDNG_M(dev)	((dev)->pci_device == 0x0046)
-#define IS_IGDNG(dev)	(IS_IGDNG_D(dev) || IS_IGDNG_M(dev))
+#define IS_IRONLAKE_D(dev)	((dev)->pci_device == 0x0042)
+#define IS_IRONLAKE_M(dev)	((dev)->pci_device == 0x0046)
+#define IS_IRONLAKE(dev)	(IS_IRONLAKE_D(dev) || IS_IRONLAKE_M(dev))
 
 #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
 		      IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \
-		      IS_IGDNG(dev))
+		      IS_IRONLAKE(dev))
 
 #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
 			IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
-			IS_IGD(dev) || IS_IGDNG_M(dev))
+			IS_PINEVIEW(dev) || IS_IRONLAKE_M(dev))
 
 #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \
-				IS_IGDNG(dev))
+				IS_IRONLAKE(dev))
 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
  * rows, which changed the alignment requirements and fence programming.
  */
 #define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
 						      IS_I915GM(dev)))
-#define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_IGDNG(dev))
-#define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_IGDNG(dev))
-#define SUPPORTS_EDP(dev)		(IS_IGDNG_M(dev))
+#define SUPPORTS_DIGITAL_OUTPUTS(dev)	(IS_I9XX(dev) && !IS_PINEVIEW(dev))
+#define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_IRONLAKE(dev))
+#define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_IRONLAKE(dev))
+#define SUPPORTS_EDP(dev)		(IS_IRONLAKE_M(dev))
+#define SUPPORTS_TV(dev)		(IS_I9XX(dev) && IS_MOBILE(dev) && \
+					!IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
 #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev))
 /* dsparb controlled by hw only */
-#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev))
+#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
 
-#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev))
-#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev))
+#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
+#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
 #define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \
 			   (IS_I9XX(dev) || IS_GM45(dev)) && \
-			   !IS_IGD(dev) && \
-			   !IS_IGDNG(dev))
+			   !IS_PINEVIEW(dev) && \
+			   !IS_IRONLAKE(dev))
+#define I915_HAS_RC6(dev) (IS_I965GM(dev) || IS_GM45(dev) || IS_IRONLAKE_M(dev))
 
 #define PRIMARY_RINGBUFFER_SIZE         (128*1024)
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a2a3fa5..8c463cf 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1288,6 +1288,7 @@
 	list->hash.key = list->file_offset_node->start;
 	if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
 		DRM_ERROR("failed to add to map hash\n");
+		ret = -ENOMEM;
 		goto out_free_mm;
 	}
 
@@ -1583,7 +1584,7 @@
  *
  * Returned sequence numbers are nonzero on success.
  */
-static uint32_t
+uint32_t
 i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
 		 uint32_t flush_domains)
 {
@@ -1617,7 +1618,7 @@
 	OUT_RING(MI_USER_INTERRUPT);
 	ADVANCE_LP_RING();
 
-	DRM_DEBUG("%d\n", seqno);
+	DRM_DEBUG_DRIVER("%d\n", seqno);
 
 	request->seqno = seqno;
 	request->emitted_jiffies = jiffies;
@@ -1820,12 +1821,8 @@
 	mutex_unlock(&dev->struct_mutex);
 }
 
-/**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-static int
-i915_wait_request(struct drm_device *dev, uint32_t seqno)
+int
+i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 ier;
@@ -1837,7 +1834,7 @@
 		return -EIO;
 
 	if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
-		if (IS_IGDNG(dev))
+		if (IS_IRONLAKE(dev))
 			ier = I915_READ(DEIER) | I915_READ(GTIER);
 		else
 			ier = I915_READ(IER);
@@ -1852,10 +1849,15 @@
 
 		dev_priv->mm.waiting_gem_seqno = seqno;
 		i915_user_irq_get(dev);
-		ret = wait_event_interruptible(dev_priv->irq_queue,
-					       i915_seqno_passed(i915_get_gem_seqno(dev),
-								 seqno) ||
-					       atomic_read(&dev_priv->mm.wedged));
+		if (interruptible)
+			ret = wait_event_interruptible(dev_priv->irq_queue,
+				i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
+				atomic_read(&dev_priv->mm.wedged));
+		else
+			wait_event(dev_priv->irq_queue,
+				i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
+				atomic_read(&dev_priv->mm.wedged));
+
 		i915_user_irq_put(dev);
 		dev_priv->mm.waiting_gem_seqno = 0;
 
@@ -1879,6 +1881,16 @@
 	return ret;
 }
 
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+static int
+i915_wait_request(struct drm_device *dev, uint32_t seqno)
+{
+	return i915_do_wait_request(dev, seqno, 1);
+}
+
 static void
 i915_gem_flush(struct drm_device *dev,
 	       uint32_t invalidate_domains,
@@ -1947,7 +1959,7 @@
 #endif
 		BEGIN_LP_RING(2);
 		OUT_RING(cmd);
-		OUT_RING(0); /* noop */
+		OUT_RING(MI_NOOP);
 		ADVANCE_LP_RING();
 	}
 }
@@ -2760,6 +2772,22 @@
 					    old_write_domain);
 }
 
+void
+i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
+{
+	switch (obj->write_domain) {
+	case I915_GEM_DOMAIN_GTT:
+		i915_gem_object_flush_gtt_write_domain(obj);
+		break;
+	case I915_GEM_DOMAIN_CPU:
+		i915_gem_object_flush_cpu_write_domain(obj);
+		break;
+	default:
+		i915_gem_object_flush_gpu_write_domain(obj);
+		break;
+	}
+}
+
 /**
  * Moves a single object to the GTT read, and possibly write domain.
  *
@@ -3525,6 +3553,41 @@
 	return 0;
 }
 
+static int
+i915_gem_wait_for_pending_flip(struct drm_device *dev,
+			       struct drm_gem_object **object_list,
+			       int count)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj_priv;
+	DEFINE_WAIT(wait);
+	int i, ret = 0;
+
+	for (;;) {
+		prepare_to_wait(&dev_priv->pending_flip_queue,
+				&wait, TASK_INTERRUPTIBLE);
+		for (i = 0; i < count; i++) {
+			obj_priv = object_list[i]->driver_private;
+			if (atomic_read(&obj_priv->pending_flip) > 0)
+				break;
+		}
+		if (i == count)
+			break;
+
+		if (!signal_pending(current)) {
+			mutex_unlock(&dev->struct_mutex);
+			schedule();
+			mutex_lock(&dev->struct_mutex);
+			continue;
+		}
+		ret = -ERESTARTSYS;
+		break;
+	}
+	finish_wait(&dev_priv->pending_flip_queue, &wait);
+
+	return ret;
+}
+
 int
 i915_gem_execbuffer(struct drm_device *dev, void *data,
 		    struct drm_file *file_priv)
@@ -3540,7 +3603,7 @@
 	int ret, ret2, i, pinned = 0;
 	uint64_t exec_offset;
 	uint32_t seqno, flush_domains, reloc_index;
-	int pin_tries;
+	int pin_tries, flips;
 
 #if WATCH_EXEC
 	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
@@ -3552,8 +3615,8 @@
 		return -EINVAL;
 	}
 	/* Copy in the exec list from userland */
-	exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
-	object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
+	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
+	object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
 	if (exec_list == NULL || object_list == NULL) {
 		DRM_ERROR("Failed to allocate exec or object list "
 			  "for %d buffers\n",
@@ -3598,20 +3661,19 @@
 	i915_verify_inactive(dev, __FILE__, __LINE__);
 
 	if (atomic_read(&dev_priv->mm.wedged)) {
-		DRM_ERROR("Execbuf while wedged\n");
 		mutex_unlock(&dev->struct_mutex);
 		ret = -EIO;
 		goto pre_mutex_err;
 	}
 
 	if (dev_priv->mm.suspended) {
-		DRM_ERROR("Execbuf while VT-switched.\n");
 		mutex_unlock(&dev->struct_mutex);
 		ret = -EBUSY;
 		goto pre_mutex_err;
 	}
 
 	/* Look up object handles */
+	flips = 0;
 	for (i = 0; i < args->buffer_count; i++) {
 		object_list[i] = drm_gem_object_lookup(dev, file_priv,
 						       exec_list[i].handle);
@@ -3630,6 +3692,14 @@
 			goto err;
 		}
 		obj_priv->in_execbuffer = true;
+		flips += atomic_read(&obj_priv->pending_flip);
+	}
+
+	if (flips > 0) {
+		ret = i915_gem_wait_for_pending_flip(dev, object_list,
+						     args->buffer_count);
+		if (ret)
+			goto err;
 	}
 
 	/* Pin and relocate */
@@ -4356,7 +4426,7 @@
 	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
 	I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
 	I915_READ(HWS_PGA); /* posting read */
-	DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
+	DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
 
 	return 0;
 }
@@ -4614,8 +4684,8 @@
 			for (i = 0; i < 8; i++)
 				I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
 	}
-
 	i915_gem_detect_bit_6_swizzle(dev);
+	init_waitqueue_head(&dev_priv->pending_flip_queue);
 }
 
 /*
@@ -4790,7 +4860,7 @@
 	user_data = (char __user *) (uintptr_t) args->data_ptr;
 	obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
 
-	DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
+	DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
 	ret = copy_from_user(obj_addr, user_data, args->size);
 	if (ret)
 		return -EFAULT;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 200e398..30d6af6 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -121,7 +121,7 @@
 				     0,   pcibios_align_resource,
 				     dev_priv->bridge_dev);
 	if (ret) {
-		DRM_DEBUG("failed bus alloc: %d\n", ret);
+		DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
 		dev_priv->mch_res.start = 0;
 		goto out;
 	}
@@ -209,8 +209,8 @@
 	uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
 	bool need_disable;
 
-	if (IS_IGDNG(dev)) {
-		/* On IGDNG whatever DRAM config, GPU always do
+	if (IS_IRONLAKE(dev)) {
+		/* On Ironlake whatever DRAM config, GPU always do
 		 * same swizzling setup.
 		 */
 		swizzle_x = I915_BIT_6_SWIZZLE_9_10;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index aa7fd82..85f4c5d 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -43,10 +43,13 @@
  * we leave them always unmasked in IMR and then control enabling them through
  * PIPESTAT alone.
  */
-#define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT |		 \
-				   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
-				   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
-				   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+#define I915_INTERRUPT_ENABLE_FIX			\
+	(I915_ASLE_INTERRUPT |				\
+	 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |		\
+	 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |		\
+	 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |	\
+	 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |	\
+	 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
 
 /** Interrupts that we mask and unmask at runtime. */
 #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
@@ -61,7 +64,7 @@
 					 DRM_I915_VBLANK_PIPE_B)
 
 void
-igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
+ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
 	if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
 		dev_priv->gt_irq_mask_reg &= ~mask;
@@ -71,7 +74,7 @@
 }
 
 static inline void
-igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
+ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
 	if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
 		dev_priv->gt_irq_mask_reg |= mask;
@@ -82,7 +85,7 @@
 
 /* For display hotplug interrupt */
 void
-igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
 	if ((dev_priv->irq_mask_reg & mask) != 0) {
 		dev_priv->irq_mask_reg &= ~mask;
@@ -92,7 +95,7 @@
 }
 
 static inline void
-igdng_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
 	if ((dev_priv->irq_mask_reg & mask) != mask) {
 		dev_priv->irq_mask_reg |= mask;
@@ -157,6 +160,20 @@
 }
 
 /**
+ * intel_enable_asle - enable ASLE interrupt for OpRegion
+ */
+void intel_enable_asle (struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+	if (IS_IRONLAKE(dev))
+		ironlake_enable_display_irq(dev_priv, DE_GSE);
+	else
+		i915_enable_pipestat(dev_priv, 1,
+				     I915_LEGACY_BLC_EVENT_ENABLE);
+}
+
+/**
  * i915_pipe_enabled - check if a pipe is enabled
  * @dev: DRM device
  * @pipe: pipe to check
@@ -191,7 +208,8 @@
 	low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
 
 	if (!i915_pipe_enabled(dev, pipe)) {
-		DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
+		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
+				"pipe %d\n", pipe);
 		return 0;
 	}
 
@@ -220,7 +238,8 @@
 	int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
 
 	if (!i915_pipe_enabled(dev, pipe)) {
-		DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
+		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
+					"pipe %d\n", pipe);
 		return 0;
 	}
 
@@ -250,12 +269,12 @@
 	drm_sysfs_hotplug_event(dev);
 }
 
-irqreturn_t igdng_irq_handler(struct drm_device *dev)
+irqreturn_t ironlake_irq_handler(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	int ret = IRQ_NONE;
-	u32 de_iir, gt_iir, de_ier;
-	u32 new_de_iir, new_gt_iir;
+	u32 de_iir, gt_iir, de_ier, pch_iir;
+	u32 new_de_iir, new_gt_iir, new_pch_iir;
 	struct drm_i915_master_private *master_priv;
 
 	/* disable master interrupt before clearing iir  */
@@ -265,13 +284,18 @@
 
 	de_iir = I915_READ(DEIIR);
 	gt_iir = I915_READ(GTIIR);
+	pch_iir = I915_READ(SDEIIR);
 
 	for (;;) {
-		if (de_iir == 0 && gt_iir == 0)
+		if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
 			break;
 
 		ret = IRQ_HANDLED;
 
+		/* should clear PCH hotplug event before clear CPU irq */
+		I915_WRITE(SDEIIR, pch_iir);
+		new_pch_iir = I915_READ(SDEIIR);
+
 		I915_WRITE(DEIIR, de_iir);
 		new_de_iir = I915_READ(DEIIR);
 		I915_WRITE(GTIIR, gt_iir);
@@ -291,8 +315,18 @@
 			DRM_WAKEUP(&dev_priv->irq_queue);
 		}
 
+		if (de_iir & DE_GSE)
+			ironlake_opregion_gse_intr(dev);
+
+		/* check event from PCH */
+		if ((de_iir & DE_PCH_EVENT) &&
+			(pch_iir & SDE_HOTPLUG_MASK)) {
+			queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+		}
+
 		de_iir = new_de_iir;
 		gt_iir = new_gt_iir;
+		pch_iir = new_pch_iir;
 	}
 
 	I915_WRITE(DEIER, de_ier);
@@ -317,19 +351,19 @@
 	char *reset_event[] = { "RESET=1", NULL };
 	char *reset_done_event[] = { "ERROR=0", NULL };
 
-	DRM_DEBUG("generating error event\n");
+	DRM_DEBUG_DRIVER("generating error event\n");
 	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
 
 	if (atomic_read(&dev_priv->mm.wedged)) {
 		if (IS_I965G(dev)) {
-			DRM_DEBUG("resetting chip\n");
+			DRM_DEBUG_DRIVER("resetting chip\n");
 			kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
 			if (!i965_reset(dev, GDRST_RENDER)) {
 				atomic_set(&dev_priv->mm.wedged, 0);
 				kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
 			}
 		} else {
-			printk("reboot required\n");
+			DRM_DEBUG_DRIVER("reboot required\n");
 		}
 	}
 }
@@ -355,7 +389,7 @@
 
 	error = kmalloc(sizeof(*error), GFP_ATOMIC);
 	if (!error) {
-		DRM_DEBUG("out ot memory, not capturing error state\n");
+		DRM_DEBUG_DRIVER("out ot memory, not capturing error state\n");
 		goto out;
 	}
 
@@ -512,7 +546,6 @@
 		/*
 		 * Wakeup waiting processes so they don't hang
 		 */
-		printk("i915: Waking up sleeping processes\n");
 		DRM_WAKEUP(&dev_priv->irq_queue);
 	}
 
@@ -535,8 +568,8 @@
 
 	atomic_inc(&dev_priv->irq_received);
 
-	if (IS_IGDNG(dev))
-		return igdng_irq_handler(dev);
+	if (IS_IRONLAKE(dev))
+		return ironlake_irq_handler(dev);
 
 	iir = I915_READ(IIR);
 
@@ -568,14 +601,14 @@
 		 */
 		if (pipea_stats & 0x8000ffff) {
 			if (pipea_stats &  PIPE_FIFO_UNDERRUN_STATUS)
-				DRM_DEBUG("pipe a underrun\n");
+				DRM_DEBUG_DRIVER("pipe a underrun\n");
 			I915_WRITE(PIPEASTAT, pipea_stats);
 			irq_received = 1;
 		}
 
 		if (pipeb_stats & 0x8000ffff) {
 			if (pipeb_stats &  PIPE_FIFO_UNDERRUN_STATUS)
-				DRM_DEBUG("pipe b underrun\n");
+				DRM_DEBUG_DRIVER("pipe b underrun\n");
 			I915_WRITE(PIPEBSTAT, pipeb_stats);
 			irq_received = 1;
 		}
@@ -591,7 +624,7 @@
 		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
 			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
 
-			DRM_DEBUG("hotplug event received, stat 0x%08x\n",
+			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
 				  hotplug_status);
 			if (hotplug_status & dev_priv->hotplug_supported_mask)
 				queue_work(dev_priv->wq,
@@ -599,27 +632,6 @@
 
 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
 			I915_READ(PORT_HOTPLUG_STAT);
-
-			/* EOS interrupts occurs */
-			if (IS_IGD(dev) &&
-				(hotplug_status & CRT_EOS_INT_STATUS)) {
-				u32 temp;
-
-				DRM_DEBUG("EOS interrupt occurs\n");
-				/* status is already cleared */
-				temp = I915_READ(ADPA);
-				temp &= ~ADPA_DAC_ENABLE;
-				I915_WRITE(ADPA, temp);
-
-				temp = I915_READ(PORT_HOTPLUG_EN);
-				temp &= ~CRT_EOS_INT_EN;
-				I915_WRITE(PORT_HOTPLUG_EN, temp);
-
-				temp = I915_READ(PORT_HOTPLUG_STAT);
-				if (temp & CRT_EOS_INT_STATUS)
-					I915_WRITE(PORT_HOTPLUG_STAT,
-						CRT_EOS_INT_STATUS);
-			}
 		}
 
 		I915_WRITE(IIR, iir);
@@ -641,14 +653,22 @@
 			mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
 		}
 
+		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
+			intel_prepare_page_flip(dev, 0);
+
+		if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
+			intel_prepare_page_flip(dev, 1);
+
 		if (pipea_stats & vblank_status) {
 			vblank++;
 			drm_handle_vblank(dev, 0);
+			intel_finish_page_flip(dev, 0);
 		}
 
 		if (pipeb_stats & vblank_status) {
 			vblank++;
 			drm_handle_vblank(dev, 1);
+			intel_finish_page_flip(dev, 1);
 		}
 
 		if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
@@ -684,7 +704,7 @@
 
 	i915_kernel_lost_context(dev);
 
-	DRM_DEBUG("\n");
+	DRM_DEBUG_DRIVER("\n");
 
 	dev_priv->counter++;
 	if (dev_priv->counter > 0x7FFFFFFFUL)
@@ -709,8 +729,8 @@
 
 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
 	if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
-		if (IS_IGDNG(dev))
-			igdng_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+		if (IS_IRONLAKE(dev))
+			ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
 		else
 			i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
 	}
@@ -725,8 +745,8 @@
 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
 	BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
 	if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
-		if (IS_IGDNG(dev))
-			igdng_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+		if (IS_IRONLAKE(dev))
+			ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
 		else
 			i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
 	}
@@ -749,7 +769,7 @@
 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
 	int ret = 0;
 
-	DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
+	DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
 		  READ_BREADCRUMB(dev_priv));
 
 	if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
@@ -832,7 +852,7 @@
 	if (!(pipeconf & PIPEACONF_ENABLE))
 		return -EINVAL;
 
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		return 0;
 
 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
@@ -854,7 +874,7 @@
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	unsigned long irqflags;
 
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		return;
 
 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
@@ -868,7 +888,7 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (!IS_IGDNG(dev))
+	if (!IS_IRONLAKE(dev))
 		opregion_enable_asle(dev);
 	dev_priv->irq_enabled = 1;
 }
@@ -976,7 +996,7 @@
 
 /* drm_dma.h hooks
 */
-static void igdng_irq_preinstall(struct drm_device *dev)
+static void ironlake_irq_preinstall(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
@@ -992,14 +1012,21 @@
 	I915_WRITE(GTIMR, 0xffffffff);
 	I915_WRITE(GTIER, 0x0);
 	(void) I915_READ(GTIER);
+
+	/* south display irq */
+	I915_WRITE(SDEIMR, 0xffffffff);
+	I915_WRITE(SDEIER, 0x0);
+	(void) I915_READ(SDEIER);
 }
 
-static int igdng_irq_postinstall(struct drm_device *dev)
+static int ironlake_irq_postinstall(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	/* enable kind of interrupts always enabled */
-	u32 display_mask = DE_MASTER_IRQ_CONTROL /*| DE_PCH_EVENT */;
+	u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT;
 	u32 render_mask = GT_USER_INTERRUPT;
+	u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
+			   SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
 
 	dev_priv->irq_mask_reg = ~display_mask;
 	dev_priv->de_irq_enable_reg = display_mask;
@@ -1019,6 +1046,14 @@
 	I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
 	(void) I915_READ(GTIER);
 
+	dev_priv->pch_irq_mask_reg = ~hotplug_mask;
+	dev_priv->pch_irq_enable_reg = hotplug_mask;
+
+	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+	I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg);
+	I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
+	(void) I915_READ(SDEIER);
+
 	return 0;
 }
 
@@ -1031,8 +1066,8 @@
 	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
 	INIT_WORK(&dev_priv->error_work, i915_error_work_func);
 
-	if (IS_IGDNG(dev)) {
-		igdng_irq_preinstall(dev);
+	if (IS_IRONLAKE(dev)) {
+		ironlake_irq_preinstall(dev);
 		return;
 	}
 
@@ -1059,8 +1094,8 @@
 
 	dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
 
-	if (IS_IGDNG(dev))
-		return igdng_irq_postinstall(dev);
+	if (IS_IRONLAKE(dev))
+		return ironlake_irq_postinstall(dev);
 
 	/* Unmask the interrupts that we always want on. */
 	dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
@@ -1120,7 +1155,7 @@
 	return 0;
 }
 
-static void igdng_irq_uninstall(struct drm_device *dev)
+static void ironlake_irq_uninstall(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	I915_WRITE(HWSTAM, 0xffffffff);
@@ -1143,8 +1178,8 @@
 
 	dev_priv->vblank_pipe = 0;
 
-	if (IS_IGDNG(dev)) {
-		igdng_irq_uninstall(dev);
+	if (IS_IRONLAKE(dev)) {
+		ironlake_irq_uninstall(dev);
 		return;
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index 2d51935..7cc8410 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -118,6 +118,10 @@
 #define ASLE_BACKLIGHT_FAIL    (2<<12)
 #define ASLE_PFIT_FAIL         (2<<14)
 #define ASLE_PWM_FREQ_FAIL     (2<<16)
+#define ASLE_ALS_ILLUM_FAILED	(1<<10)
+#define ASLE_BACKLIGHT_FAILED	(1<<12)
+#define ASLE_PFIT_FAILED	(1<<14)
+#define ASLE_PWM_FREQ_FAILED	(1<<16)
 
 /* ASLE backlight brightness to set */
 #define ASLE_BCLP_VALID                (1<<31)
@@ -163,7 +167,7 @@
 	if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE))
 		pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
 	else {
-		if (IS_IGD(dev)) {
+		if (IS_PINEVIEW(dev)) {
 			blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
 			max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> 
 					BACKLIGHT_MODULATION_FREQ_SHIFT;
@@ -224,7 +228,7 @@
 	asle_req = asle->aslc & ASLE_REQ_MSK;
 
 	if (!asle_req) {
-		DRM_DEBUG("non asle set request??\n");
+		DRM_DEBUG_DRIVER("non asle set request??\n");
 		return;
 	}
 
@@ -243,6 +247,73 @@
 	asle->aslc = asle_stat;
 }
 
+static u32 asle_set_backlight_ironlake(struct drm_device *dev, u32 bclp)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct opregion_asle *asle = dev_priv->opregion.asle;
+	u32 cpu_pwm_ctl, pch_pwm_ctl2;
+	u32 max_backlight, level;
+
+	if (!(bclp & ASLE_BCLP_VALID))
+		return ASLE_BACKLIGHT_FAILED;
+
+	bclp &= ASLE_BCLP_MSK;
+	if (bclp < 0 || bclp > 255)
+		return ASLE_BACKLIGHT_FAILED;
+
+	cpu_pwm_ctl = I915_READ(BLC_PWM_CPU_CTL);
+	pch_pwm_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+	/* get the max PWM frequency */
+	max_backlight = (pch_pwm_ctl2 >> 16) & BACKLIGHT_DUTY_CYCLE_MASK;
+	/* calculate the expected PMW frequency */
+	level = (bclp * max_backlight) / 255;
+	/* reserve the high 16 bits */
+	cpu_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK);
+	/* write the updated PWM frequency */
+	I915_WRITE(BLC_PWM_CPU_CTL, cpu_pwm_ctl | level);
+
+	asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
+
+	return 0;
+}
+
+void ironlake_opregion_gse_intr(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct opregion_asle *asle = dev_priv->opregion.asle;
+	u32 asle_stat = 0;
+	u32 asle_req;
+
+	if (!asle)
+		return;
+
+	asle_req = asle->aslc & ASLE_REQ_MSK;
+
+	if (!asle_req) {
+		DRM_DEBUG_DRIVER("non asle set request??\n");
+		return;
+	}
+
+	if (asle_req & ASLE_SET_ALS_ILLUM) {
+		DRM_DEBUG_DRIVER("Illum is not supported\n");
+		asle_stat |= ASLE_ALS_ILLUM_FAILED;
+	}
+
+	if (asle_req & ASLE_SET_BACKLIGHT)
+		asle_stat |= asle_set_backlight_ironlake(dev, asle->bclp);
+
+	if (asle_req & ASLE_SET_PFIT) {
+		DRM_DEBUG_DRIVER("Pfit is not supported\n");
+		asle_stat |= ASLE_PFIT_FAILED;
+	}
+
+	if (asle_req & ASLE_SET_PWM_FREQ) {
+		DRM_DEBUG_DRIVER("PWM freq is not supported\n");
+		asle_stat |= ASLE_PWM_FREQ_FAILED;
+	}
+
+	asle->aslc = asle_stat;
+}
 #define ASLE_ALS_EN    (1<<0)
 #define ASLE_BLC_EN    (1<<1)
 #define ASLE_PFIT_EN   (1<<2)
@@ -258,8 +329,7 @@
 			unsigned long irqflags;
 
 			spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-			i915_enable_pipestat(dev_priv, 1,
-					     I915_LEGACY_BLC_EVENT_ENABLE);
+			intel_enable_asle(dev);
 			spin_unlock_irqrestore(&dev_priv->user_irq_lock,
 					       irqflags);
 		}
@@ -361,9 +431,9 @@
 	int err = 0;
 
 	pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
-	DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls);
+	DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
 	if (asls == 0) {
-		DRM_DEBUG("ACPI OpRegion not supported!\n");
+		DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
 		return -ENOTSUPP;
 	}
 
@@ -373,30 +443,30 @@
 
 	opregion->header = base;
 	if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
-		DRM_DEBUG("opregion signature mismatch\n");
+		DRM_DEBUG_DRIVER("opregion signature mismatch\n");
 		err = -EINVAL;
 		goto err_out;
 	}
 
 	mboxes = opregion->header->mboxes;
 	if (mboxes & MBOX_ACPI) {
-		DRM_DEBUG("Public ACPI methods supported\n");
+		DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
 		opregion->acpi = base + OPREGION_ACPI_OFFSET;
 		if (drm_core_check_feature(dev, DRIVER_MODESET))
 			intel_didl_outputs(dev);
 	} else {
-		DRM_DEBUG("Public ACPI methods not supported\n");
+		DRM_DEBUG_DRIVER("Public ACPI methods not supported\n");
 		err = -ENOTSUPP;
 		goto err_out;
 	}
 	opregion->enabled = 1;
 
 	if (mboxes & MBOX_SWSCI) {
-		DRM_DEBUG("SWSCI supported\n");
+		DRM_DEBUG_DRIVER("SWSCI supported\n");
 		opregion->swsci = base + OPREGION_SWSCI_OFFSET;
 	}
 	if (mboxes & MBOX_ASLE) {
-		DRM_DEBUG("ASLE supported\n");
+		DRM_DEBUG_DRIVER("ASLE supported\n");
 		opregion->asle = base + OPREGION_ASLE_OFFSET;
 		opregion_enable_asle(dev);
 	}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 1687edf..974b3cf 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -140,6 +140,7 @@
 #define MI_NOOP			MI_INSTR(0, 0)
 #define MI_USER_INTERRUPT	MI_INSTR(0x02, 0)
 #define MI_WAIT_FOR_EVENT       MI_INSTR(0x03, 0)
+#define   MI_WAIT_FOR_OVERLAY_FLIP	(1<<16)
 #define   MI_WAIT_FOR_PLANE_B_FLIP      (1<<6)
 #define   MI_WAIT_FOR_PLANE_A_FLIP      (1<<2)
 #define   MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
@@ -151,7 +152,13 @@
 #define   MI_END_SCENE		(1 << 4) /* flush binner and incr scene count */
 #define MI_BATCH_BUFFER_END	MI_INSTR(0x0a, 0)
 #define MI_REPORT_HEAD		MI_INSTR(0x07, 0)
+#define MI_OVERLAY_FLIP		MI_INSTR(0x11,0)
+#define   MI_OVERLAY_CONTINUE	(0x0<<21)
+#define   MI_OVERLAY_ON		(0x1<<21)
+#define   MI_OVERLAY_OFF	(0x2<<21)
 #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
+#define MI_DISPLAY_FLIP		MI_INSTR(0x14, 2)
+#define   MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
 #define MI_STORE_DWORD_IMM	MI_INSTR(0x20, 1)
 #define   MI_MEM_VIRTUAL	(1 << 22) /* 965+ only */
 #define MI_STORE_DWORD_INDEX	MI_INSTR(0x21, 1)
@@ -260,6 +267,8 @@
 #define HWS_PGA		0x02080
 #define HWS_ADDRESS_MASK	0xfffff000
 #define HWS_START_ADDRESS_SHIFT	4
+#define PWRCTXA		0x2088 /* 965GM+ only */
+#define   PWRCTX_EN	(1<<0)
 #define IPEIR		0x02088
 #define IPEHR		0x0208c
 #define INSTDONE	0x02090
@@ -405,6 +414,13 @@
 # define GPIO_DATA_VAL_IN		(1 << 12)
 # define GPIO_DATA_PULLUP_DISABLE	(1 << 13)
 
+#define GMBUS0			0x5100
+#define GMBUS1			0x5104
+#define GMBUS2			0x5108
+#define GMBUS3			0x510c
+#define GMBUS4			0x5110
+#define GMBUS5			0x5120
+
 /*
  * Clock control & power management
  */
@@ -435,7 +451,7 @@
 #define   DPLLB_LVDS_P2_CLOCK_DIV_7	(1 << 24) /* i915 */
 #define   DPLL_P2_CLOCK_DIV_MASK	0x03000000 /* i915 */
 #define   DPLL_FPA01_P1_POST_DIV_MASK	0x00ff0000 /* i915 */
-#define   DPLL_FPA01_P1_POST_DIV_MASK_IGD	0x00ff8000 /* IGD */
+#define   DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW	0x00ff8000 /* Pineview */
 
 #define I915_FIFO_UNDERRUN_STATUS		(1UL<<31)
 #define I915_CRC_ERROR_ENABLE			(1UL<<29)
@@ -512,7 +528,7 @@
  */
 #define   DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS	0x003f0000
 #define   DPLL_FPA01_P1_POST_DIV_SHIFT	16
-#define   DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15
+#define   DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15
 /* i830, required in DVO non-gang */
 #define   PLL_P2_DIVIDE_BY_4		(1 << 23)
 #define   PLL_P1_DIVIDE_BY_TWO		(1 << 21) /* i830 */
@@ -522,7 +538,7 @@
 #define   PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
 #define   PLL_REF_INPUT_MASK		(3 << 13)
 #define   PLL_LOAD_PULSE_PHASE_SHIFT		9
-/* IGDNG */
+/* Ironlake */
 # define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT     9
 # define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK      (7 << 9)
 # define PLL_REF_SDVO_HDMI_MULTIPLIER(x)	(((x)-1) << 9)
@@ -586,12 +602,12 @@
 #define FPB0	0x06048
 #define FPB1	0x0604c
 #define   FP_N_DIV_MASK		0x003f0000
-#define   FP_N_IGD_DIV_MASK	0x00ff0000
+#define   FP_N_PINEVIEW_DIV_MASK	0x00ff0000
 #define   FP_N_DIV_SHIFT		16
 #define   FP_M1_DIV_MASK	0x00003f00
 #define   FP_M1_DIV_SHIFT		 8
 #define   FP_M2_DIV_MASK	0x0000003f
-#define   FP_M2_IGD_DIV_MASK	0x000000ff
+#define   FP_M2_PINEVIEW_DIV_MASK	0x000000ff
 #define   FP_M2_DIV_SHIFT		 0
 #define DPLL_TEST	0x606c
 #define   DPLLB_TEST_SDVO_DIV_1		(0 << 22)
@@ -769,7 +785,8 @@
 
 /** GM965 GM45 render standby register */
 #define MCHBAR_RENDER_STANDBY	0x111B8
-
+#define   RCX_SW_EXIT		(1<<23)
+#define   RSX_STATUS_MASK	0x00700000
 #define PEG_BAND_GAP_DATA	0x14d68
 
 /*
@@ -844,7 +861,6 @@
 #define   SDVOB_HOTPLUG_INT_EN			(1 << 26)
 #define   SDVOC_HOTPLUG_INT_EN			(1 << 25)
 #define   TV_HOTPLUG_INT_EN			(1 << 18)
-#define   CRT_EOS_INT_EN			(1 << 10)
 #define   CRT_HOTPLUG_INT_EN			(1 << 9)
 #define   CRT_HOTPLUG_FORCE_DETECT		(1 << 3)
 #define CRT_HOTPLUG_ACTIVATION_PERIOD_32	(0 << 8)
@@ -868,7 +884,6 @@
 			 HDMID_HOTPLUG_INT_EN |	  \
 			 SDVOB_HOTPLUG_INT_EN |	  \
 			 SDVOC_HOTPLUG_INT_EN |	  \
-			 TV_HOTPLUG_INT_EN |	  \
 			 CRT_HOTPLUG_INT_EN)
 
 
@@ -879,7 +894,6 @@
 #define   DPC_HOTPLUG_INT_STATUS		(1 << 28)
 #define   HDMID_HOTPLUG_INT_STATUS		(1 << 27)
 #define   DPD_HOTPLUG_INT_STATUS		(1 << 27)
-#define   CRT_EOS_INT_STATUS			(1 << 12)
 #define   CRT_HOTPLUG_INT_STATUS		(1 << 11)
 #define   TV_HOTPLUG_INT_STATUS			(1 << 10)
 #define   CRT_HOTPLUG_MONITOR_MASK		(3 << 8)
@@ -1620,7 +1634,7 @@
 #define   DP_CLOCK_OUTPUT_ENABLE	(1 << 13)
 
 #define   DP_SCRAMBLING_DISABLE		(1 << 12)
-#define   DP_SCRAMBLING_DISABLE_IGDNG	(1 << 7)
+#define   DP_SCRAMBLING_DISABLE_IRONLAKE	(1 << 7)
 
 /** limit RGB values to avoid confusing TVs */
 #define   DP_COLOR_RANGE_16_235		(1 << 8)
@@ -1808,7 +1822,7 @@
 #define DSPFW3			0x7003c
 #define   DSPFW_HPLL_SR_EN	(1<<31)
 #define   DSPFW_CURSOR_SR_SHIFT	24
-#define   IGD_SELF_REFRESH_EN	(1<<30)
+#define   PINEVIEW_SELF_REFRESH_EN	(1<<30)
 
 /* FIFO watermark sizes etc */
 #define G4X_FIFO_LINE_SIZE	64
@@ -1824,16 +1838,16 @@
 #define G4X_MAX_WM		0x3f
 #define I915_MAX_WM		0x3f
 
-#define IGD_DISPLAY_FIFO	512 /* in 64byte unit */
-#define IGD_FIFO_LINE_SIZE	64
-#define IGD_MAX_WM		0x1ff
-#define IGD_DFT_WM		0x3f
-#define IGD_DFT_HPLLOFF_WM	0
-#define IGD_GUARD_WM		10
-#define IGD_CURSOR_FIFO		64
-#define IGD_CURSOR_MAX_WM	0x3f
-#define IGD_CURSOR_DFT_WM	0
-#define IGD_CURSOR_GUARD_WM	5
+#define PINEVIEW_DISPLAY_FIFO	512 /* in 64byte unit */
+#define PINEVIEW_FIFO_LINE_SIZE	64
+#define PINEVIEW_MAX_WM		0x1ff
+#define PINEVIEW_DFT_WM		0x3f
+#define PINEVIEW_DFT_HPLLOFF_WM	0
+#define PINEVIEW_GUARD_WM		10
+#define PINEVIEW_CURSOR_FIFO		64
+#define PINEVIEW_CURSOR_MAX_WM	0x3f
+#define PINEVIEW_CURSOR_DFT_WM	0
+#define PINEVIEW_CURSOR_GUARD_WM	5
 
 /*
  * The two pipe frame counter registers are not synchronized, so
@@ -1907,6 +1921,7 @@
 #define   DISPPLANE_16BPP			(0x5<<26)
 #define   DISPPLANE_32BPP_NO_ALPHA		(0x6<<26)
 #define   DISPPLANE_32BPP			(0x7<<26)
+#define   DISPPLANE_32BPP_30BIT_NO_ALPHA	(0xa<<26)
 #define   DISPPLANE_STEREO_ENABLE		(1<<25)
 #define   DISPPLANE_STEREO_DISABLE		0
 #define   DISPPLANE_SEL_PIPE_MASK		(1<<24)
@@ -1918,7 +1933,7 @@
 #define   DISPPLANE_NO_LINE_DOUBLE		0
 #define   DISPPLANE_STEREO_POLARITY_FIRST	0
 #define   DISPPLANE_STEREO_POLARITY_SECOND	(1<<18)
-#define   DISPPLANE_TRICKLE_FEED_DISABLE	(1<<14) /* IGDNG */
+#define   DISPPLANE_TRICKLE_FEED_DISABLE	(1<<14) /* Ironlake */
 #define   DISPPLANE_TILED			(1<<10)
 #define DSPAADDR		0x70184
 #define DSPASTRIDE		0x70188
@@ -1971,7 +1986,7 @@
 # define VGA_2X_MODE				(1 << 30)
 # define VGA_PIPE_B_SELECT			(1 << 29)
 
-/* IGDNG */
+/* Ironlake */
 
 #define CPU_VGACNTRL	0x41000
 
@@ -2117,6 +2132,7 @@
 #define SDE_PORTC_HOTPLUG       (1 << 9)
 #define SDE_PORTB_HOTPLUG       (1 << 8)
 #define SDE_SDVOB_HOTPLUG       (1 << 6)
+#define SDE_HOTPLUG_MASK	(0xf << 8)
 
 #define SDEISR  0xc4000
 #define SDEIMR  0xc4004
@@ -2157,6 +2173,13 @@
 #define PCH_GPIOE               0xc5020
 #define PCH_GPIOF               0xc5024
 
+#define PCH_GMBUS0		0xc5100
+#define PCH_GMBUS1		0xc5104
+#define PCH_GMBUS2		0xc5108
+#define PCH_GMBUS3		0xc510c
+#define PCH_GMBUS4		0xc5110
+#define PCH_GMBUS5		0xc5120
+
 #define PCH_DPLL_A              0xc6014
 #define PCH_DPLL_B              0xc6018
 
@@ -2292,7 +2315,7 @@
 #define  FDI_DP_PORT_WIDTH_X3           (2<<19)
 #define  FDI_DP_PORT_WIDTH_X4           (3<<19)
 #define  FDI_TX_ENHANCE_FRAME_ENABLE    (1<<18)
-/* IGDNG: hardwired to 1 */
+/* Ironlake: hardwired to 1 */
 #define  FDI_TX_PLL_ENABLE              (1<<14)
 /* both Tx and Rx */
 #define  FDI_SCRAMBLING_ENABLE          (0<<7)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 6eec817..d5ebb00 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -27,14 +27,14 @@
 #include "drmP.h"
 #include "drm.h"
 #include "i915_drm.h"
-#include "i915_drv.h"
+#include "intel_drv.h"
 
 static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32	dpll_reg;
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B;
 	} else {
 		dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B;
@@ -53,7 +53,7 @@
 	if (!i915_pipe_enabled(dev, pipe))
 		return;
 
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
 
 	if (pipe == PIPE_A)
@@ -75,7 +75,7 @@
 	if (!i915_pipe_enabled(dev, pipe))
 		return;
 
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
 
 	if (pipe == PIPE_A)
@@ -239,7 +239,7 @@
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return;
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
 		dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
 	}
@@ -247,7 +247,7 @@
 	/* Pipe & plane A info */
 	dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
 	dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		dev_priv->saveFPA0 = I915_READ(PCH_FPA0);
 		dev_priv->saveFPA1 = I915_READ(PCH_FPA1);
 		dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A);
@@ -256,7 +256,7 @@
 		dev_priv->saveFPA1 = I915_READ(FPA1);
 		dev_priv->saveDPLL_A = I915_READ(DPLL_A);
 	}
-	if (IS_I965G(dev) && !IS_IGDNG(dev))
+	if (IS_I965G(dev) && !IS_IRONLAKE(dev))
 		dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
 	dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
 	dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
@@ -264,10 +264,10 @@
 	dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
 	dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
 	dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
-	if (!IS_IGDNG(dev))
+	if (!IS_IRONLAKE(dev))
 		dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
 		dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
 		dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
@@ -304,7 +304,7 @@
 	/* Pipe & plane B info */
 	dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
 	dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		dev_priv->saveFPB0 = I915_READ(PCH_FPB0);
 		dev_priv->saveFPB1 = I915_READ(PCH_FPB1);
 		dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B);
@@ -313,7 +313,7 @@
 		dev_priv->saveFPB1 = I915_READ(FPB1);
 		dev_priv->saveDPLL_B = I915_READ(DPLL_B);
 	}
-	if (IS_I965G(dev) && !IS_IGDNG(dev))
+	if (IS_I965G(dev) && !IS_IRONLAKE(dev))
 		dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
 	dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
 	dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
@@ -321,10 +321,10 @@
 	dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
 	dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
 	dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
-	if (!IS_IGDNG(dev))
+	if (!IS_IRONLAKE(dev))
 		dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
 		dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
 		dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
@@ -369,7 +369,7 @@
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return;
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		dpll_a_reg = PCH_DPLL_A;
 		dpll_b_reg = PCH_DPLL_B;
 		fpa0_reg = PCH_FPA0;
@@ -385,7 +385,7 @@
 		fpb1_reg = FPB1;
 	}
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
 		I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
 	}
@@ -402,7 +402,7 @@
 	/* Actually enable it */
 	I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
 	DRM_UDELAY(150);
-	if (IS_I965G(dev) && !IS_IGDNG(dev))
+	if (IS_I965G(dev) && !IS_IRONLAKE(dev))
 		I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
 	DRM_UDELAY(150);
 
@@ -413,10 +413,10 @@
 	I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
 	I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
 	I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
-	if (!IS_IGDNG(dev))
+	if (!IS_IRONLAKE(dev))
 		I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
 		I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
 		I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
@@ -467,7 +467,7 @@
 	/* Actually enable it */
 	I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
 	DRM_UDELAY(150);
-	if (IS_I965G(dev) && !IS_IGDNG(dev))
+	if (IS_I965G(dev) && !IS_IRONLAKE(dev))
 		I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
 	DRM_UDELAY(150);
 
@@ -478,10 +478,10 @@
 	I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
 	I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
 	I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
-	if (!IS_IGDNG(dev))
+	if (!IS_IRONLAKE(dev))
 		I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
 		I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
 		I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
@@ -546,14 +546,14 @@
 		dev_priv->saveCURSIZE = I915_READ(CURSIZE);
 
 	/* CRT state */
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		dev_priv->saveADPA = I915_READ(PCH_ADPA);
 	} else {
 		dev_priv->saveADPA = I915_READ(ADPA);
 	}
 
 	/* LVDS state */
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
 		dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
 		dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
@@ -571,10 +571,10 @@
 			dev_priv->saveLVDS = I915_READ(LVDS);
 	}
 
-	if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev))
+	if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
 		dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
 		dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
 		dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
@@ -614,7 +614,7 @@
 	dev_priv->saveVGA0 = I915_READ(VGA0);
 	dev_priv->saveVGA1 = I915_READ(VGA1);
 	dev_priv->saveVGA_PD = I915_READ(VGA_PD);
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
 	else
 		dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
@@ -656,24 +656,24 @@
 		I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
 
 	/* CRT state */
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
 	else
 		I915_WRITE(ADPA, dev_priv->saveADPA);
 
 	/* LVDS state */
-	if (IS_I965G(dev) && !IS_IGDNG(dev))
+	if (IS_I965G(dev) && !IS_IRONLAKE(dev))
 		I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
 	} else if (IS_MOBILE(dev) && !IS_I830(dev))
 		I915_WRITE(LVDS, dev_priv->saveLVDS);
 
-	if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev))
+	if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
 		I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
 		I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
 		I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
@@ -713,7 +713,7 @@
 	}
 
 	/* VGA state */
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
 	else
 		I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
@@ -733,8 +733,10 @@
 	pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
 
 	/* Render Standby */
-	if (IS_I965G(dev) && IS_MOBILE(dev))
+	if (I915_HAS_RC6(dev)) {
 		dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
+		dev_priv->savePWRCTXA = I915_READ(PWRCTXA);
+	}
 
 	/* Hardware status page */
 	dev_priv->saveHWS = I915_READ(HWS_PGA);
@@ -742,7 +744,7 @@
 	i915_save_display(dev);
 
 	/* Interrupt state */
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		dev_priv->saveDEIER = I915_READ(DEIER);
 		dev_priv->saveDEIMR = I915_READ(DEIMR);
 		dev_priv->saveGTIER = I915_READ(GTIER);
@@ -754,10 +756,6 @@
 		dev_priv->saveIMR = I915_READ(IMR);
 	}
 
-	/* Clock gating state */
-	dev_priv->saveD_STATE = I915_READ(D_STATE);
-	dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D); /* Not sure about this */
-
 	/* Cache mode state */
 	dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
 
@@ -796,8 +794,10 @@
 	pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
 
 	/* Render Standby */
-	if (IS_I965G(dev) && IS_MOBILE(dev))
+	if (I915_HAS_RC6(dev)) {
 		I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
+		I915_WRITE(PWRCTXA, dev_priv->savePWRCTXA);
+	}
 
 	/* Hardware status page */
 	I915_WRITE(HWS_PGA, dev_priv->saveHWS);
@@ -817,7 +817,7 @@
 	i915_restore_display(dev);
 
 	/* Interrupt state */
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		I915_WRITE(DEIER, dev_priv->saveDEIER);
 		I915_WRITE(DEIMR, dev_priv->saveDEIMR);
 		I915_WRITE(GTIER, dev_priv->saveGTIER);
@@ -830,8 +830,7 @@
 	}
 
 	/* Clock gating state */
-	I915_WRITE (D_STATE, dev_priv->saveD_STATE);
-	I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
+	intel_init_clock_gating(dev);
 
 	/* Cache mode state */
 	I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
@@ -846,6 +845,9 @@
 	for (i = 0; i < 3; i++)
 		I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
 
+	/* I2C state */
+	intel_i2c_reset_gmbus(dev);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 96cd256..f275677 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -114,6 +114,8 @@
 	struct lvds_dvo_timing *dvo_timing;
 	struct drm_display_mode *panel_fixed_mode;
 	int lfp_data_size, dvo_timing_offset;
+	int i, temp_downclock;
+	struct drm_display_mode *temp_mode;
 
 	/* Defaults if we can't find VBT info */
 	dev_priv->lvds_dither = 0;
@@ -159,9 +161,49 @@
 
 	dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
 
-	DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
+	DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
 	drm_mode_debug_printmodeline(panel_fixed_mode);
 
+	temp_mode = kzalloc(sizeof(*temp_mode), GFP_KERNEL);
+	temp_downclock = panel_fixed_mode->clock;
+	/*
+	 * enumerate the LVDS panel timing info entry in VBT to check whether
+	 * the LVDS downclock is found.
+	 */
+	for (i = 0; i < 16; i++) {
+		entry = (struct bdb_lvds_lfp_data_entry *)
+			((uint8_t *)lvds_lfp_data->data + (lfp_data_size * i));
+		dvo_timing = (struct lvds_dvo_timing *)
+			((unsigned char *)entry + dvo_timing_offset);
+
+		fill_detail_timing_data(temp_mode, dvo_timing);
+
+		if (temp_mode->hdisplay == panel_fixed_mode->hdisplay &&
+		temp_mode->hsync_start == panel_fixed_mode->hsync_start &&
+		temp_mode->hsync_end == panel_fixed_mode->hsync_end &&
+		temp_mode->htotal == panel_fixed_mode->htotal &&
+		temp_mode->vdisplay == panel_fixed_mode->vdisplay &&
+		temp_mode->vsync_start == panel_fixed_mode->vsync_start &&
+		temp_mode->vsync_end == panel_fixed_mode->vsync_end &&
+		temp_mode->vtotal == panel_fixed_mode->vtotal &&
+		temp_mode->clock < temp_downclock) {
+			/*
+			 * downclock is already found. But we expect
+			 * to find the lower downclock.
+			 */
+			temp_downclock = temp_mode->clock;
+		}
+		/* clear it to zero */
+		memset(temp_mode, 0, sizeof(*temp_mode));
+	}
+	kfree(temp_mode);
+	if (temp_downclock < panel_fixed_mode->clock) {
+		dev_priv->lvds_downclock_avail = 1;
+		dev_priv->lvds_downclock = temp_downclock;
+		DRM_DEBUG_KMS("LVDS downclock is found in VBT. ",
+				"Normal Clock %dKHz, downclock %dKHz\n",
+				temp_downclock, panel_fixed_mode->clock);
+	}
 	return;
 }
 
@@ -217,7 +259,7 @@
 			if (IS_I85X(dev_priv->dev))
 				dev_priv->lvds_ssc_freq =
 					general->ssc_freq ? 66 : 48;
-			else if (IS_IGDNG(dev_priv->dev))
+			else if (IS_IRONLAKE(dev_priv->dev))
 				dev_priv->lvds_ssc_freq =
 					general->ssc_freq ? 100 : 120;
 			else
@@ -241,22 +283,18 @@
 		GPIOF,
 	};
 
-	/* Set sensible defaults in case we can't find the general block
-	   or it is the wrong chipset */
-	dev_priv->crt_ddc_bus = -1;
-
 	general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
 	if (general) {
 		u16 block_size = get_blocksize(general);
 		if (block_size >= sizeof(*general)) {
 			int bus_pin = general->crt_ddc_gmbus_pin;
-			DRM_DEBUG("crt_ddc_bus_pin: %d\n", bus_pin);
+			DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
 			if ((bus_pin >= 1) && (bus_pin <= 6)) {
 				dev_priv->crt_ddc_bus =
 					crt_bus_map_table[bus_pin-1];
 			}
 		} else {
-			DRM_DEBUG("BDB_GD too small (%d). Invalid.\n",
+			DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
 				  block_size);
 		}
 	}
@@ -274,7 +312,7 @@
 
 	p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
 	if (!p_defs) {
-		DRM_DEBUG("No general definition block is found\n");
+		DRM_DEBUG_KMS("No general definition block is found\n");
 		return;
 	}
 	/* judge whether the size of child device meets the requirements.
@@ -284,7 +322,7 @@
 	 */
 	if (p_defs->child_dev_size != sizeof(*p_child)) {
 		/* different child dev size . Ignore it */
-		DRM_DEBUG("different child size is found. Invalid.\n");
+		DRM_DEBUG_KMS("different child size is found. Invalid.\n");
 		return;
 	}
 	/* get the block size of general definitions */
@@ -310,11 +348,11 @@
 		if (p_child->dvo_port != DEVICE_PORT_DVOB &&
 			p_child->dvo_port != DEVICE_PORT_DVOC) {
 			/* skip the incorrect SDVO port */
-			DRM_DEBUG("Incorrect SDVO port. Skip it \n");
+			DRM_DEBUG_KMS("Incorrect SDVO port. Skip it \n");
 			continue;
 		}
-		DRM_DEBUG("the SDVO device with slave addr %2x is found on "
-				"%s port\n",
+		DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
+				" %s port\n",
 				p_child->slave_addr,
 				(p_child->dvo_port == DEVICE_PORT_DVOB) ?
 					"SDVOB" : "SDVOC");
@@ -325,21 +363,21 @@
 			p_mapping->dvo_wiring = p_child->dvo_wiring;
 			p_mapping->initialized = 1;
 		} else {
-			DRM_DEBUG("Maybe one SDVO port is shared by "
+			DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
 					 "two SDVO device.\n");
 		}
 		if (p_child->slave2_addr) {
 			/* Maybe this is a SDVO device with multiple inputs */
 			/* And the mapping info is not added */
-			DRM_DEBUG("there exists the slave2_addr. Maybe this "
-				"is a SDVO device with multiple inputs.\n");
+			DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
+				" is a SDVO device with multiple inputs.\n");
 		}
 		count++;
 	}
 
 	if (!count) {
 		/* No SDVO device info is found */
-		DRM_DEBUG("No SDVO device info is found in VBT\n");
+		DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
 	}
 	return;
 }
@@ -366,6 +404,70 @@
 		dev_priv->render_reclock_avail = true;
 }
 
+static void
+parse_device_mapping(struct drm_i915_private *dev_priv,
+		       struct bdb_header *bdb)
+{
+	struct bdb_general_definitions *p_defs;
+	struct child_device_config *p_child, *child_dev_ptr;
+	int i, child_device_num, count;
+	u16	block_size;
+
+	p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+	if (!p_defs) {
+		DRM_DEBUG_KMS("No general definition block is found\n");
+		return;
+	}
+	/* judge whether the size of child device meets the requirements.
+	 * If the child device size obtained from general definition block
+	 * is different with sizeof(struct child_device_config), skip the
+	 * parsing of sdvo device info
+	 */
+	if (p_defs->child_dev_size != sizeof(*p_child)) {
+		/* different child dev size . Ignore it */
+		DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+		return;
+	}
+	/* get the block size of general definitions */
+	block_size = get_blocksize(p_defs);
+	/* get the number of child device */
+	child_device_num = (block_size - sizeof(*p_defs)) /
+				sizeof(*p_child);
+	count = 0;
+	/* get the number of child device that is present */
+	for (i = 0; i < child_device_num; i++) {
+		p_child = &(p_defs->devices[i]);
+		if (!p_child->device_type) {
+			/* skip the device block if device type is invalid */
+			continue;
+		}
+		count++;
+	}
+	if (!count) {
+		DRM_DEBUG_KMS("no child dev is parsed from VBT \n");
+		return;
+	}
+	dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL);
+	if (!dev_priv->child_dev) {
+		DRM_DEBUG_KMS("No memory space for child device\n");
+		return;
+	}
+
+	dev_priv->child_dev_num = count;
+	count = 0;
+	for (i = 0; i < child_device_num; i++) {
+		p_child = &(p_defs->devices[i]);
+		if (!p_child->device_type) {
+			/* skip the device block if device type is invalid */
+			continue;
+		}
+		child_dev_ptr = dev_priv->child_dev + count;
+		count++;
+		memcpy((void *)child_dev_ptr, (void *)p_child,
+					sizeof(*p_child));
+	}
+	return;
+}
 /**
  * intel_init_bios - initialize VBIOS settings & find VBT
  * @dev: DRM device
@@ -417,6 +519,7 @@
 	parse_lfp_panel_data(dev_priv, bdb);
 	parse_sdvo_panel_data(dev_priv, bdb);
 	parse_sdvo_device_mapping(dev_priv, bdb);
+	parse_device_mapping(dev_priv, bdb);
 	parse_driver_features(dev_priv, bdb);
 
 	pci_unmap_rom(pdev, bios);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 0f8e5f6..425ac9d 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -549,4 +549,21 @@
 #define   SWF14_APM_STANDBY	0x1
 #define   SWF14_APM_RESTORE	0x0
 
+/* Add the device class for LFP, TV, HDMI */
+#define	 DEVICE_TYPE_INT_LFP	0x1022
+#define	 DEVICE_TYPE_INT_TV	0x1009
+#define	 DEVICE_TYPE_HDMI	0x60D2
+#define	 DEVICE_TYPE_DP		0x68C6
+#define	 DEVICE_TYPE_eDP	0x78C6
+
+/* define the DVO port for HDMI output type */
+#define		DVO_B		1
+#define		DVO_C		2
+#define		DVO_D		3
+
+/* define the PORT for DP output type */
+#define		PORT_IDPB	7
+#define		PORT_IDPC	8
+#define		PORT_IDPD	9
+
 #endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index e505144..9f3d3e5 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -39,7 +39,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 temp, reg;
 
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		reg = PCH_ADPA;
 	else
 		reg = ADPA;
@@ -64,34 +64,6 @@
 	}
 
 	I915_WRITE(reg, temp);
-
-	if (IS_IGD(dev)) {
-		if (mode == DRM_MODE_DPMS_OFF) {
-			/* turn off DAC */
-			temp = I915_READ(PORT_HOTPLUG_EN);
-			temp &= ~CRT_EOS_INT_EN;
-			I915_WRITE(PORT_HOTPLUG_EN, temp);
-
-			temp = I915_READ(PORT_HOTPLUG_STAT);
-			if (temp & CRT_EOS_INT_STATUS)
-				I915_WRITE(PORT_HOTPLUG_STAT,
-					CRT_EOS_INT_STATUS);
-		} else {
-			/* turn on DAC. EOS interrupt must be enabled after DAC
-			 * is enabled, so it sounds not good to enable it in
-			 * i915_driver_irq_postinstall()
-			 * wait 12.5ms after DAC is enabled
-			 */
-			msleep(13);
-			temp = I915_READ(PORT_HOTPLUG_STAT);
-			if (temp & CRT_EOS_INT_STATUS)
-				I915_WRITE(PORT_HOTPLUG_STAT,
-					CRT_EOS_INT_STATUS);
-			temp = I915_READ(PORT_HOTPLUG_EN);
-			temp |= CRT_EOS_INT_EN;
-			I915_WRITE(PORT_HOTPLUG_EN, temp);
-		}
-	}
 }
 
 static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -141,7 +113,7 @@
 	else
 		dpll_md_reg = DPLL_B_MD;
 
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		adpa_reg = PCH_ADPA;
 	else
 		adpa_reg = ADPA;
@@ -150,7 +122,7 @@
 	 * Disable separate mode multiplier used when cloning SDVO to CRT
 	 * XXX this needs to be adjusted when we really are cloning
 	 */
-	if (IS_I965G(dev) && !IS_IGDNG(dev)) {
+	if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
 		dpll_md = I915_READ(dpll_md_reg);
 		I915_WRITE(dpll_md_reg,
 			   dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
@@ -164,18 +136,18 @@
 
 	if (intel_crtc->pipe == 0) {
 		adpa |= ADPA_PIPE_A_SELECT;
-		if (!IS_IGDNG(dev))
+		if (!IS_IRONLAKE(dev))
 			I915_WRITE(BCLRPAT_A, 0);
 	} else {
 		adpa |= ADPA_PIPE_B_SELECT;
-		if (!IS_IGDNG(dev))
+		if (!IS_IRONLAKE(dev))
 			I915_WRITE(BCLRPAT_B, 0);
 	}
 
 	I915_WRITE(adpa_reg, adpa);
 }
 
-static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
+static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -194,7 +166,7 @@
 			ADPA_CRT_HOTPLUG_ENABLE |
 			ADPA_CRT_HOTPLUG_FORCE_TRIGGER);
 
-	DRM_DEBUG("pch crt adpa 0x%x", adpa);
+	DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa);
 	I915_WRITE(PCH_ADPA, adpa);
 
 	while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
@@ -227,8 +199,8 @@
 	u32 hotplug_en;
 	int i, tries = 0;
 
-	if (IS_IGDNG(dev))
-		return intel_igdng_crt_detect_hotplug(connector);
+	if (IS_IRONLAKE(dev))
+		return intel_ironlake_crt_detect_hotplug(connector);
 
 	/*
 	 * On 4 series desktop, CRT detect sequence need to be done twice
@@ -549,12 +521,12 @@
 					  &intel_output->enc);
 
 	/* Set up the DDC bus. */
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		i2c_reg = PCH_GPIOA;
 	else {
 		i2c_reg = GPIOA;
 		/* Use VBT information for CRT DDC if available */
-		if (dev_priv->crt_ddc_bus != -1)
+		if (dev_priv->crt_ddc_bus != 0)
 			i2c_reg = dev_priv->crt_ddc_bus;
 	}
 	intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 099f420..52cd9b0 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -32,7 +32,7 @@
 #include "intel_drv.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
-#include "intel_dp.h"
+#include "drm_dp_helper.h"
 
 #include "drm_crtc_helper.h"
 
@@ -102,32 +102,32 @@
 #define I9XX_DOT_MAX		 400000
 #define I9XX_VCO_MIN		1400000
 #define I9XX_VCO_MAX		2800000
-#define IGD_VCO_MIN		1700000
-#define IGD_VCO_MAX		3500000
+#define PINEVIEW_VCO_MIN		1700000
+#define PINEVIEW_VCO_MAX		3500000
 #define I9XX_N_MIN		      1
 #define I9XX_N_MAX		      6
-/* IGD's Ncounter is a ring counter */
-#define IGD_N_MIN		      3
-#define IGD_N_MAX		      6
+/* Pineview's Ncounter is a ring counter */
+#define PINEVIEW_N_MIN		      3
+#define PINEVIEW_N_MAX		      6
 #define I9XX_M_MIN		     70
 #define I9XX_M_MAX		    120
-#define IGD_M_MIN		      2
-#define IGD_M_MAX		    256
+#define PINEVIEW_M_MIN		      2
+#define PINEVIEW_M_MAX		    256
 #define I9XX_M1_MIN		     10
 #define I9XX_M1_MAX		     22
 #define I9XX_M2_MIN		      5
 #define I9XX_M2_MAX		      9
-/* IGD M1 is reserved, and must be 0 */
-#define IGD_M1_MIN		      0
-#define IGD_M1_MAX		      0
-#define IGD_M2_MIN		      0
-#define IGD_M2_MAX		      254
+/* Pineview M1 is reserved, and must be 0 */
+#define PINEVIEW_M1_MIN		      0
+#define PINEVIEW_M1_MAX		      0
+#define PINEVIEW_M2_MIN		      0
+#define PINEVIEW_M2_MAX		      254
 #define I9XX_P_SDVO_DAC_MIN	      5
 #define I9XX_P_SDVO_DAC_MAX	     80
 #define I9XX_P_LVDS_MIN		      7
 #define I9XX_P_LVDS_MAX		     98
-#define IGD_P_LVDS_MIN		      7
-#define IGD_P_LVDS_MAX		     112
+#define PINEVIEW_P_LVDS_MIN		      7
+#define PINEVIEW_P_LVDS_MAX		     112
 #define I9XX_P1_MIN		      1
 #define I9XX_P1_MAX		      8
 #define I9XX_P2_SDVO_DAC_SLOW		     10
@@ -234,33 +234,33 @@
 #define G4X_P2_DISPLAY_PORT_FAST           10
 #define G4X_P2_DISPLAY_PORT_LIMIT          0
 
-/* IGDNG */
+/* Ironlake */
 /* as we calculate clock using (register_value + 2) for
    N/M1/M2, so here the range value for them is (actual_value-2).
  */
-#define IGDNG_DOT_MIN         25000
-#define IGDNG_DOT_MAX         350000
-#define IGDNG_VCO_MIN         1760000
-#define IGDNG_VCO_MAX         3510000
-#define IGDNG_N_MIN           1
-#define IGDNG_N_MAX           5
-#define IGDNG_M_MIN           79
-#define IGDNG_M_MAX           118
-#define IGDNG_M1_MIN          12
-#define IGDNG_M1_MAX          23
-#define IGDNG_M2_MIN          5
-#define IGDNG_M2_MAX          9
-#define IGDNG_P_SDVO_DAC_MIN  5
-#define IGDNG_P_SDVO_DAC_MAX  80
-#define IGDNG_P_LVDS_MIN      28
-#define IGDNG_P_LVDS_MAX      112
-#define IGDNG_P1_MIN          1
-#define IGDNG_P1_MAX          8
-#define IGDNG_P2_SDVO_DAC_SLOW 10
-#define IGDNG_P2_SDVO_DAC_FAST 5
-#define IGDNG_P2_LVDS_SLOW    14 /* single channel */
-#define IGDNG_P2_LVDS_FAST    7  /* double channel */
-#define IGDNG_P2_DOT_LIMIT    225000 /* 225Mhz */
+#define IRONLAKE_DOT_MIN         25000
+#define IRONLAKE_DOT_MAX         350000
+#define IRONLAKE_VCO_MIN         1760000
+#define IRONLAKE_VCO_MAX         3510000
+#define IRONLAKE_N_MIN           1
+#define IRONLAKE_N_MAX           5
+#define IRONLAKE_M_MIN           79
+#define IRONLAKE_M_MAX           118
+#define IRONLAKE_M1_MIN          12
+#define IRONLAKE_M1_MAX          23
+#define IRONLAKE_M2_MIN          5
+#define IRONLAKE_M2_MAX          9
+#define IRONLAKE_P_SDVO_DAC_MIN  5
+#define IRONLAKE_P_SDVO_DAC_MAX  80
+#define IRONLAKE_P_LVDS_MIN      28
+#define IRONLAKE_P_LVDS_MAX      112
+#define IRONLAKE_P1_MIN          1
+#define IRONLAKE_P1_MAX          8
+#define IRONLAKE_P2_SDVO_DAC_SLOW 10
+#define IRONLAKE_P2_SDVO_DAC_FAST 5
+#define IRONLAKE_P2_LVDS_SLOW    14 /* single channel */
+#define IRONLAKE_P2_LVDS_FAST    7  /* double channel */
+#define IRONLAKE_P2_DOT_LIMIT    225000 /* 225Mhz */
 
 static bool
 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
@@ -272,15 +272,15 @@
 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
 			int target, int refclk, intel_clock_t *best_clock);
 static bool
-intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
-			int target, int refclk, intel_clock_t *best_clock);
+intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+			     int target, int refclk, intel_clock_t *best_clock);
 
 static bool
 intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
 		      int target, int refclk, intel_clock_t *best_clock);
 static bool
-intel_find_pll_igdng_dp(const intel_limit_t *, struct drm_crtc *crtc,
-		      int target, int refclk, intel_clock_t *best_clock);
+intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
+			   int target, int refclk, intel_clock_t *best_clock);
 
 static const intel_limit_t intel_limits_i8xx_dvo = {
         .dot = { .min = I8XX_DOT_MIN,		.max = I8XX_DOT_MAX },
@@ -453,13 +453,13 @@
         .find_pll = intel_find_pll_g4x_dp,
 };
 
-static const intel_limit_t intel_limits_igd_sdvo = {
+static const intel_limit_t intel_limits_pineview_sdvo = {
         .dot = { .min = I9XX_DOT_MIN,		.max = I9XX_DOT_MAX},
-        .vco = { .min = IGD_VCO_MIN,		.max = IGD_VCO_MAX },
-        .n   = { .min = IGD_N_MIN,		.max = IGD_N_MAX },
-        .m   = { .min = IGD_M_MIN,		.max = IGD_M_MAX },
-        .m1  = { .min = IGD_M1_MIN,		.max = IGD_M1_MAX },
-        .m2  = { .min = IGD_M2_MIN,		.max = IGD_M2_MAX },
+        .vco = { .min = PINEVIEW_VCO_MIN,		.max = PINEVIEW_VCO_MAX },
+        .n   = { .min = PINEVIEW_N_MIN,		.max = PINEVIEW_N_MAX },
+        .m   = { .min = PINEVIEW_M_MIN,		.max = PINEVIEW_M_MAX },
+        .m1  = { .min = PINEVIEW_M1_MIN,		.max = PINEVIEW_M1_MAX },
+        .m2  = { .min = PINEVIEW_M2_MIN,		.max = PINEVIEW_M2_MAX },
         .p   = { .min = I9XX_P_SDVO_DAC_MIN,    .max = I9XX_P_SDVO_DAC_MAX },
         .p1  = { .min = I9XX_P1_MIN,		.max = I9XX_P1_MAX },
 	.p2  = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
@@ -468,59 +468,59 @@
 	.find_reduced_pll = intel_find_best_reduced_PLL,
 };
 
-static const intel_limit_t intel_limits_igd_lvds = {
+static const intel_limit_t intel_limits_pineview_lvds = {
         .dot = { .min = I9XX_DOT_MIN,		.max = I9XX_DOT_MAX },
-        .vco = { .min = IGD_VCO_MIN,		.max = IGD_VCO_MAX },
-        .n   = { .min = IGD_N_MIN,		.max = IGD_N_MAX },
-        .m   = { .min = IGD_M_MIN,		.max = IGD_M_MAX },
-        .m1  = { .min = IGD_M1_MIN,		.max = IGD_M1_MAX },
-        .m2  = { .min = IGD_M2_MIN,		.max = IGD_M2_MAX },
-        .p   = { .min = IGD_P_LVDS_MIN,	.max = IGD_P_LVDS_MAX },
+        .vco = { .min = PINEVIEW_VCO_MIN,		.max = PINEVIEW_VCO_MAX },
+        .n   = { .min = PINEVIEW_N_MIN,		.max = PINEVIEW_N_MAX },
+        .m   = { .min = PINEVIEW_M_MIN,		.max = PINEVIEW_M_MAX },
+        .m1  = { .min = PINEVIEW_M1_MIN,		.max = PINEVIEW_M1_MAX },
+        .m2  = { .min = PINEVIEW_M2_MIN,		.max = PINEVIEW_M2_MAX },
+        .p   = { .min = PINEVIEW_P_LVDS_MIN,	.max = PINEVIEW_P_LVDS_MAX },
         .p1  = { .min = I9XX_P1_MIN,		.max = I9XX_P1_MAX },
-	/* IGD only supports single-channel mode. */
+	/* Pineview only supports single-channel mode. */
 	.p2  = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
 		 .p2_slow = I9XX_P2_LVDS_SLOW,	.p2_fast = I9XX_P2_LVDS_SLOW },
 	.find_pll = intel_find_best_PLL,
 	.find_reduced_pll = intel_find_best_reduced_PLL,
 };
 
-static const intel_limit_t intel_limits_igdng_sdvo = {
-	.dot = { .min = IGDNG_DOT_MIN,          .max = IGDNG_DOT_MAX },
-	.vco = { .min = IGDNG_VCO_MIN,          .max = IGDNG_VCO_MAX },
-	.n   = { .min = IGDNG_N_MIN,            .max = IGDNG_N_MAX },
-	.m   = { .min = IGDNG_M_MIN,            .max = IGDNG_M_MAX },
-	.m1  = { .min = IGDNG_M1_MIN,           .max = IGDNG_M1_MAX },
-	.m2  = { .min = IGDNG_M2_MIN,           .max = IGDNG_M2_MAX },
-	.p   = { .min = IGDNG_P_SDVO_DAC_MIN,   .max = IGDNG_P_SDVO_DAC_MAX },
-	.p1  = { .min = IGDNG_P1_MIN,           .max = IGDNG_P1_MAX },
-	.p2  = { .dot_limit = IGDNG_P2_DOT_LIMIT,
-		 .p2_slow = IGDNG_P2_SDVO_DAC_SLOW,
-		 .p2_fast = IGDNG_P2_SDVO_DAC_FAST },
-	.find_pll = intel_igdng_find_best_PLL,
+static const intel_limit_t intel_limits_ironlake_sdvo = {
+	.dot = { .min = IRONLAKE_DOT_MIN,          .max = IRONLAKE_DOT_MAX },
+	.vco = { .min = IRONLAKE_VCO_MIN,          .max = IRONLAKE_VCO_MAX },
+	.n   = { .min = IRONLAKE_N_MIN,            .max = IRONLAKE_N_MAX },
+	.m   = { .min = IRONLAKE_M_MIN,            .max = IRONLAKE_M_MAX },
+	.m1  = { .min = IRONLAKE_M1_MIN,           .max = IRONLAKE_M1_MAX },
+	.m2  = { .min = IRONLAKE_M2_MIN,           .max = IRONLAKE_M2_MAX },
+	.p   = { .min = IRONLAKE_P_SDVO_DAC_MIN,   .max = IRONLAKE_P_SDVO_DAC_MAX },
+	.p1  = { .min = IRONLAKE_P1_MIN,           .max = IRONLAKE_P1_MAX },
+	.p2  = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
+		 .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW,
+		 .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST },
+	.find_pll = intel_ironlake_find_best_PLL,
 };
 
-static const intel_limit_t intel_limits_igdng_lvds = {
-	.dot = { .min = IGDNG_DOT_MIN,          .max = IGDNG_DOT_MAX },
-	.vco = { .min = IGDNG_VCO_MIN,          .max = IGDNG_VCO_MAX },
-	.n   = { .min = IGDNG_N_MIN,            .max = IGDNG_N_MAX },
-	.m   = { .min = IGDNG_M_MIN,            .max = IGDNG_M_MAX },
-	.m1  = { .min = IGDNG_M1_MIN,           .max = IGDNG_M1_MAX },
-	.m2  = { .min = IGDNG_M2_MIN,           .max = IGDNG_M2_MAX },
-	.p   = { .min = IGDNG_P_LVDS_MIN,       .max = IGDNG_P_LVDS_MAX },
-	.p1  = { .min = IGDNG_P1_MIN,           .max = IGDNG_P1_MAX },
-	.p2  = { .dot_limit = IGDNG_P2_DOT_LIMIT,
-		 .p2_slow = IGDNG_P2_LVDS_SLOW,
-		 .p2_fast = IGDNG_P2_LVDS_FAST },
-	.find_pll = intel_igdng_find_best_PLL,
+static const intel_limit_t intel_limits_ironlake_lvds = {
+	.dot = { .min = IRONLAKE_DOT_MIN,          .max = IRONLAKE_DOT_MAX },
+	.vco = { .min = IRONLAKE_VCO_MIN,          .max = IRONLAKE_VCO_MAX },
+	.n   = { .min = IRONLAKE_N_MIN,            .max = IRONLAKE_N_MAX },
+	.m   = { .min = IRONLAKE_M_MIN,            .max = IRONLAKE_M_MAX },
+	.m1  = { .min = IRONLAKE_M1_MIN,           .max = IRONLAKE_M1_MAX },
+	.m2  = { .min = IRONLAKE_M2_MIN,           .max = IRONLAKE_M2_MAX },
+	.p   = { .min = IRONLAKE_P_LVDS_MIN,       .max = IRONLAKE_P_LVDS_MAX },
+	.p1  = { .min = IRONLAKE_P1_MIN,           .max = IRONLAKE_P1_MAX },
+	.p2  = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
+		 .p2_slow = IRONLAKE_P2_LVDS_SLOW,
+		 .p2_fast = IRONLAKE_P2_LVDS_FAST },
+	.find_pll = intel_ironlake_find_best_PLL,
 };
 
-static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc)
+static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
 {
 	const intel_limit_t *limit;
 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
-		limit = &intel_limits_igdng_lvds;
+		limit = &intel_limits_ironlake_lvds;
 	else
-		limit = &intel_limits_igdng_sdvo;
+		limit = &intel_limits_ironlake_sdvo;
 
 	return limit;
 }
@@ -557,20 +557,20 @@
 	struct drm_device *dev = crtc->dev;
 	const intel_limit_t *limit;
 
-	if (IS_IGDNG(dev))
-		limit = intel_igdng_limit(crtc);
+	if (IS_IRONLAKE(dev))
+		limit = intel_ironlake_limit(crtc);
 	else if (IS_G4X(dev)) {
 		limit = intel_g4x_limit(crtc);
-	} else if (IS_I9XX(dev) && !IS_IGD(dev)) {
+	} else if (IS_I9XX(dev) && !IS_PINEVIEW(dev)) {
 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
 			limit = &intel_limits_i9xx_lvds;
 		else
 			limit = &intel_limits_i9xx_sdvo;
-	} else if (IS_IGD(dev)) {
+	} else if (IS_PINEVIEW(dev)) {
 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
-			limit = &intel_limits_igd_lvds;
+			limit = &intel_limits_pineview_lvds;
 		else
-			limit = &intel_limits_igd_sdvo;
+			limit = &intel_limits_pineview_sdvo;
 	} else {
 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
 			limit = &intel_limits_i8xx_lvds;
@@ -580,8 +580,8 @@
 	return limit;
 }
 
-/* m1 is reserved as 0 in IGD, n is a ring counter */
-static void igd_clock(int refclk, intel_clock_t *clock)
+/* m1 is reserved as 0 in Pineview, n is a ring counter */
+static void pineview_clock(int refclk, intel_clock_t *clock)
 {
 	clock->m = clock->m2 + 2;
 	clock->p = clock->p1 * clock->p2;
@@ -591,8 +591,8 @@
 
 static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
 {
-	if (IS_IGD(dev)) {
-		igd_clock(refclk, clock);
+	if (IS_PINEVIEW(dev)) {
+		pineview_clock(refclk, clock);
 		return;
 	}
 	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
@@ -657,7 +657,7 @@
 		INTELPllInvalid ("m2 out of range\n");
 	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
 		INTELPllInvalid ("m1 out of range\n");
-	if (clock->m1 <= clock->m2 && !IS_IGD(dev))
+	if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
 		INTELPllInvalid ("m1 <= m2\n");
 	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
 		INTELPllInvalid ("m out of range\n");
@@ -706,16 +706,17 @@
 
 	memset (best_clock, 0, sizeof (*best_clock));
 
-	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
-		for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
-		     clock.m1++) {
-			for (clock.m2 = limit->m2.min;
-			     clock.m2 <= limit->m2.max; clock.m2++) {
-				/* m1 is always 0 in IGD */
-				if (clock.m2 >= clock.m1 && !IS_IGD(dev))
-					break;
-				for (clock.n = limit->n.min;
-				     clock.n <= limit->n.max; clock.n++) {
+	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+	     clock.m1++) {
+		for (clock.m2 = limit->m2.min;
+		     clock.m2 <= limit->m2.max; clock.m2++) {
+			/* m1 is always 0 in Pineview */
+			if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
+				break;
+			for (clock.n = limit->n.min;
+			     clock.n <= limit->n.max; clock.n++) {
+				for (clock.p1 = limit->p1.min;
+					clock.p1 <= limit->p1.max; clock.p1++) {
 					int this_err;
 
 					intel_clock(dev, refclk, &clock);
@@ -751,8 +752,8 @@
 
 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
 		for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
-			/* m1 is always 0 in IGD */
-			if (clock.m2 >= clock.m1 && !IS_IGD(dev))
+			/* m1 is always 0 in Pineview */
+			if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
 				break;
 			for (clock.n = limit->n.min; clock.n <= limit->n.max;
 			     clock.n++) {
@@ -833,8 +834,8 @@
 }
 
 static bool
-intel_find_pll_igdng_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
-		      int target, int refclk, intel_clock_t *best_clock)
+intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
+			   int target, int refclk, intel_clock_t *best_clock)
 {
 	struct drm_device *dev = crtc->dev;
 	intel_clock_t clock;
@@ -857,8 +858,8 @@
 }
 
 static bool
-intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
-			int target, int refclk, intel_clock_t *best_clock)
+intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+			     int target, int refclk, intel_clock_t *best_clock)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -871,7 +872,7 @@
 		return true;
 
 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
-		return intel_find_pll_igdng_dp(limit, crtc, target,
+		return intel_find_pll_ironlake_dp(limit, crtc, target,
 					       refclk, best_clock);
 
 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
@@ -949,7 +950,7 @@
 intel_wait_for_vblank(struct drm_device *dev)
 {
 	/* Wait for 20ms, i.e. one cycle at 50hz. */
-	mdelay(20);
+	msleep(20);
 }
 
 /* Parameters have changed, update FBC info */
@@ -994,7 +995,7 @@
 		fbc_ctl |= dev_priv->cfb_fence;
 	I915_WRITE(FBC_CONTROL, fbc_ctl);
 
-	DRM_DEBUG("enabled FBC, pitch %ld, yoff %d, plane %d, ",
+	DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
 		  dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
 }
 
@@ -1017,7 +1018,7 @@
 
 	intel_wait_for_vblank(dev);
 
-	DRM_DEBUG("disabled FBC\n");
+	DRM_DEBUG_KMS("disabled FBC\n");
 }
 
 static bool i8xx_fbc_enabled(struct drm_crtc *crtc)
@@ -1062,7 +1063,7 @@
 	/* enable it... */
 	I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
 
-	DRM_DEBUG("enabled fbc on plane %d\n", intel_crtc->plane);
+	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
 }
 
 void g4x_disable_fbc(struct drm_device *dev)
@@ -1076,7 +1077,7 @@
 	I915_WRITE(DPFC_CONTROL, dpfc_ctl);
 	intel_wait_for_vblank(dev);
 
-	DRM_DEBUG("disabled FBC\n");
+	DRM_DEBUG_KMS("disabled FBC\n");
 }
 
 static bool g4x_fbc_enabled(struct drm_crtc *crtc)
@@ -1141,25 +1142,27 @@
 	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
 	 */
 	if (intel_fb->obj->size > dev_priv->cfb_size) {
-		DRM_DEBUG("framebuffer too large, disabling compression\n");
+		DRM_DEBUG_KMS("framebuffer too large, disabling "
+				"compression\n");
 		goto out_disable;
 	}
 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
 	    (mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
-		DRM_DEBUG("mode incompatible with compression, disabling\n");
+		DRM_DEBUG_KMS("mode incompatible with compression, "
+				"disabling\n");
 		goto out_disable;
 	}
 	if ((mode->hdisplay > 2048) ||
 	    (mode->vdisplay > 1536)) {
-		DRM_DEBUG("mode too large for compression, disabling\n");
+		DRM_DEBUG_KMS("mode too large for compression, disabling\n");
 		goto out_disable;
 	}
 	if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
-		DRM_DEBUG("plane not 0, disabling compression\n");
+		DRM_DEBUG_KMS("plane not 0, disabling compression\n");
 		goto out_disable;
 	}
 	if (obj_priv->tiling_mode != I915_TILING_X) {
-		DRM_DEBUG("framebuffer not tiled, disabling compression\n");
+		DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
 		goto out_disable;
 	}
 
@@ -1181,13 +1184,57 @@
 	return;
 
 out_disable:
-	DRM_DEBUG("unsupported config, disabling FBC\n");
+	DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
 	/* Multiple disables should be harmless */
 	if (dev_priv->display.fbc_enabled(crtc))
 		dev_priv->display.disable_fbc(dev);
 }
 
 static int
+intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
+{
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	u32 alignment;
+	int ret;
+
+	switch (obj_priv->tiling_mode) {
+	case I915_TILING_NONE:
+		alignment = 64 * 1024;
+		break;
+	case I915_TILING_X:
+		/* pin() will align the object as required by fence */
+		alignment = 0;
+		break;
+	case I915_TILING_Y:
+		/* FIXME: Is this true? */
+		DRM_ERROR("Y tiled not allowed for scan out buffers\n");
+		return -EINVAL;
+	default:
+		BUG();
+	}
+
+	ret = i915_gem_object_pin(obj, alignment);
+	if (ret != 0)
+		return ret;
+
+	/* Install a fence for tiled scan-out. Pre-i965 always needs a
+	 * fence, whereas 965+ only requires a fence if using
+	 * framebuffer compression.  For simplicity, we always install
+	 * a fence as the cost is not that onerous.
+	 */
+	if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
+	    obj_priv->tiling_mode != I915_TILING_NONE) {
+		ret = i915_gem_object_get_fence_reg(obj);
+		if (ret != 0) {
+			i915_gem_object_unpin(obj);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
 		    struct drm_framebuffer *old_fb)
 {
@@ -1206,12 +1253,12 @@
 	int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
 	int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
 	int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
-	u32 dspcntr, alignment;
+	u32 dspcntr;
 	int ret;
 
 	/* no fb bound */
 	if (!crtc->fb) {
-		DRM_DEBUG("No FB bound\n");
+		DRM_DEBUG_KMS("No FB bound\n");
 		return 0;
 	}
 
@@ -1228,24 +1275,8 @@
 	obj = intel_fb->obj;
 	obj_priv = obj->driver_private;
 
-	switch (obj_priv->tiling_mode) {
-	case I915_TILING_NONE:
-		alignment = 64 * 1024;
-		break;
-	case I915_TILING_X:
-		/* pin() will align the object as required by fence */
-		alignment = 0;
-		break;
-	case I915_TILING_Y:
-		/* FIXME: Is this true? */
-		DRM_ERROR("Y tiled not allowed for scan out buffers\n");
-		return -EINVAL;
-	default:
-		BUG();
-	}
-
 	mutex_lock(&dev->struct_mutex);
-	ret = i915_gem_object_pin(obj, alignment);
+	ret = intel_pin_and_fence_fb_obj(dev, obj);
 	if (ret != 0) {
 		mutex_unlock(&dev->struct_mutex);
 		return ret;
@@ -1258,20 +1289,6 @@
 		return ret;
 	}
 
-	/* Install a fence for tiled scan-out. Pre-i965 always needs a fence,
-	 * whereas 965+ only requires a fence if using framebuffer compression.
-	 * For simplicity, we always install a fence as the cost is not that onerous.
-	 */
-	if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
-	    obj_priv->tiling_mode != I915_TILING_NONE) {
-		ret = i915_gem_object_get_fence_reg(obj);
-		if (ret != 0) {
-			i915_gem_object_unpin(obj);
-			mutex_unlock(&dev->struct_mutex);
-			return ret;
-		}
-	}
-
 	dspcntr = I915_READ(dspcntr_reg);
 	/* Mask out pixel format bits in case we change it */
 	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
@@ -1287,7 +1304,10 @@
 		break;
 	case 24:
 	case 32:
-		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+		if (crtc->fb->depth == 30)
+			dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
+		else
+			dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
 		break;
 	default:
 		DRM_ERROR("Unknown color depth\n");
@@ -1302,7 +1322,7 @@
 			dspcntr &= ~DISPPLANE_TILED;
 	}
 
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		/* must disable */
 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
 
@@ -1311,7 +1331,7 @@
 	Start = obj_priv->gtt_offset;
 	Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
 
-	DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
+	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
 	I915_WRITE(dspstride, crtc->fb->pitch);
 	if (IS_I965G(dev)) {
 		I915_WRITE(dspbase, Offset);
@@ -1363,7 +1383,7 @@
 	u8 sr1;
 	u32 vga_reg;
 
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		vga_reg = CPU_VGACNTRL;
 	else
 		vga_reg = VGACNTRL;
@@ -1379,19 +1399,19 @@
 	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
 }
 
-static void igdng_disable_pll_edp (struct drm_crtc *crtc)
+static void ironlake_disable_pll_edp (struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 dpa_ctl;
 
-	DRM_DEBUG("\n");
+	DRM_DEBUG_KMS("\n");
 	dpa_ctl = I915_READ(DP_A);
 	dpa_ctl &= ~DP_PLL_ENABLE;
 	I915_WRITE(DP_A, dpa_ctl);
 }
 
-static void igdng_enable_pll_edp (struct drm_crtc *crtc)
+static void ironlake_enable_pll_edp (struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1404,13 +1424,13 @@
 }
 
 
-static void igdng_set_pll_edp (struct drm_crtc *crtc, int clock)
+static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 dpa_ctl;
 
-	DRM_DEBUG("eDP PLL enable for clock %d\n", clock);
+	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
 	dpa_ctl = I915_READ(DP_A);
 	dpa_ctl &= ~DP_PLL_FREQ_MASK;
 
@@ -1440,7 +1460,7 @@
 	udelay(500);
 }
 
-static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1481,10 +1501,19 @@
 	case DRM_MODE_DPMS_ON:
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
-		DRM_DEBUG("crtc %d dpms on\n", pipe);
+		DRM_DEBUG_KMS("crtc %d dpms on\n", pipe);
+
+		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+			temp = I915_READ(PCH_LVDS);
+			if ((temp & LVDS_PORT_EN) == 0) {
+				I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
+				POSTING_READ(PCH_LVDS);
+			}
+		}
+
 		if (HAS_eDP) {
 			/* enable eDP PLL */
-			igdng_enable_pll_edp(crtc);
+			ironlake_enable_pll_edp(crtc);
 		} else {
 			/* enable PCH DPLL */
 			temp = I915_READ(pch_dpll_reg);
@@ -1501,7 +1530,7 @@
 			I915_READ(fdi_rx_reg);
 			udelay(200);
 
-			/* Enable CPU FDI TX PLL, always on for IGDNG */
+			/* Enable CPU FDI TX PLL, always on for Ironlake */
 			temp = I915_READ(fdi_tx_reg);
 			if ((temp & FDI_TX_PLL_ENABLE) == 0) {
 				I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
@@ -1568,12 +1597,13 @@
 			udelay(150);
 
 			temp = I915_READ(fdi_rx_iir_reg);
-			DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
+			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
 
 			if ((temp & FDI_RX_BIT_LOCK) == 0) {
 				for (j = 0; j < tries; j++) {
 					temp = I915_READ(fdi_rx_iir_reg);
-					DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
+					DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
+								temp);
 					if (temp & FDI_RX_BIT_LOCK)
 						break;
 					udelay(200);
@@ -1582,11 +1612,11 @@
 					I915_WRITE(fdi_rx_iir_reg,
 							temp | FDI_RX_BIT_LOCK);
 				else
-					DRM_DEBUG("train 1 fail\n");
+					DRM_DEBUG_KMS("train 1 fail\n");
 			} else {
 				I915_WRITE(fdi_rx_iir_reg,
 						temp | FDI_RX_BIT_LOCK);
-				DRM_DEBUG("train 1 ok 2!\n");
+				DRM_DEBUG_KMS("train 1 ok 2!\n");
 			}
 			temp = I915_READ(fdi_tx_reg);
 			temp &= ~FDI_LINK_TRAIN_NONE;
@@ -1601,12 +1631,13 @@
 			udelay(150);
 
 			temp = I915_READ(fdi_rx_iir_reg);
-			DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
+			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
 
 			if ((temp & FDI_RX_SYMBOL_LOCK) == 0) {
 				for (j = 0; j < tries; j++) {
 					temp = I915_READ(fdi_rx_iir_reg);
-					DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
+					DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
+								temp);
 					if (temp & FDI_RX_SYMBOL_LOCK)
 						break;
 					udelay(200);
@@ -1614,15 +1645,15 @@
 				if (j != tries) {
 					I915_WRITE(fdi_rx_iir_reg,
 							temp | FDI_RX_SYMBOL_LOCK);
-					DRM_DEBUG("train 2 ok 1!\n");
+					DRM_DEBUG_KMS("train 2 ok 1!\n");
 				} else
-					DRM_DEBUG("train 2 fail\n");
+					DRM_DEBUG_KMS("train 2 fail\n");
 			} else {
 				I915_WRITE(fdi_rx_iir_reg,
 						temp | FDI_RX_SYMBOL_LOCK);
-				DRM_DEBUG("train 2 ok 2!\n");
+				DRM_DEBUG_KMS("train 2 ok 2!\n");
 			}
-			DRM_DEBUG("train done\n");
+			DRM_DEBUG_KMS("train done\n");
 
 			/* set transcoder timing */
 			I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
@@ -1664,9 +1695,7 @@
 
 	break;
 	case DRM_MODE_DPMS_OFF:
-		DRM_DEBUG("crtc %d dpms off\n", pipe);
-
-		i915_disable_vga(dev);
+		DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
 
 		/* Disable display plane */
 		temp = I915_READ(dspcntr_reg);
@@ -1677,6 +1706,8 @@
 			I915_READ(dspbase_reg);
 		}
 
+		i915_disable_vga(dev);
+
 		/* disable cpu pipe, disable after all planes disabled */
 		temp = I915_READ(pipeconf_reg);
 		if ((temp & PIPEACONF_ENABLE) != 0) {
@@ -1690,16 +1721,23 @@
 					udelay(500);
 					continue;
 				} else {
-					DRM_DEBUG("pipe %d off delay\n", pipe);
+					DRM_DEBUG_KMS("pipe %d off delay\n",
+								pipe);
 					break;
 				}
 			}
 		} else
-			DRM_DEBUG("crtc %d is disabled\n", pipe);
+			DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
 
-		if (HAS_eDP) {
-			igdng_disable_pll_edp(crtc);
+		udelay(100);
+
+		/* Disable PF */
+		temp = I915_READ(pf_ctl_reg);
+		if ((temp & PF_ENABLE) != 0) {
+			I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
+			I915_READ(pf_ctl_reg);
 		}
+		I915_WRITE(pf_win_size, 0);
 
 		/* disable CPU FDI tx and PCH FDI rx */
 		temp = I915_READ(fdi_tx_reg);
@@ -1725,6 +1763,13 @@
 
 		udelay(100);
 
+		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+			temp = I915_READ(PCH_LVDS);
+			I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN);
+			I915_READ(PCH_LVDS);
+			udelay(100);
+		}
+
 		/* disable PCH transcoder */
 		temp = I915_READ(transconf_reg);
 		if ((temp & TRANS_ENABLE) != 0) {
@@ -1738,12 +1783,15 @@
 					udelay(500);
 					continue;
 				} else {
-					DRM_DEBUG("transcoder %d off delay\n", pipe);
+					DRM_DEBUG_KMS("transcoder %d off "
+							"delay\n", pipe);
 					break;
 				}
 			}
 		}
 
+		udelay(100);
+
 		/* disable PCH DPLL */
 		temp = I915_READ(pch_dpll_reg);
 		if ((temp & DPLL_VCO_ENABLE) != 0) {
@@ -1751,14 +1799,20 @@
 			I915_READ(pch_dpll_reg);
 		}
 
-		temp = I915_READ(fdi_rx_reg);
-		if ((temp & FDI_RX_PLL_ENABLE) != 0) {
-			temp &= ~FDI_SEL_PCDCLK;
-			temp &= ~FDI_RX_PLL_ENABLE;
-			I915_WRITE(fdi_rx_reg, temp);
-			I915_READ(fdi_rx_reg);
+		if (HAS_eDP) {
+			ironlake_disable_pll_edp(crtc);
 		}
 
+		temp = I915_READ(fdi_rx_reg);
+		temp &= ~FDI_SEL_PCDCLK;
+		I915_WRITE(fdi_rx_reg, temp);
+		I915_READ(fdi_rx_reg);
+
+		temp = I915_READ(fdi_rx_reg);
+		temp &= ~FDI_RX_PLL_ENABLE;
+		I915_WRITE(fdi_rx_reg, temp);
+		I915_READ(fdi_rx_reg);
+
 		/* Disable CPU FDI TX PLL */
 		temp = I915_READ(fdi_tx_reg);
 		if ((temp & FDI_TX_PLL_ENABLE) != 0) {
@@ -1767,20 +1821,43 @@
 			udelay(100);
 		}
 
-		/* Disable PF */
-		temp = I915_READ(pf_ctl_reg);
-		if ((temp & PF_ENABLE) != 0) {
-			I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
-			I915_READ(pf_ctl_reg);
-		}
-		I915_WRITE(pf_win_size, 0);
-
 		/* Wait for the clocks to turn off. */
-		udelay(150);
+		udelay(100);
 		break;
 	}
 }
 
+static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
+{
+	struct intel_overlay *overlay;
+	int ret;
+
+	if (!enable && intel_crtc->overlay) {
+		overlay = intel_crtc->overlay;
+		mutex_lock(&overlay->dev->struct_mutex);
+		for (;;) {
+			ret = intel_overlay_switch_off(overlay);
+			if (ret == 0)
+				break;
+
+			ret = intel_overlay_recover_from_interrupt(overlay, 0);
+			if (ret != 0) {
+				/* overlay doesn't react anymore. Usually
+				 * results in a black screen and an unkillable
+				 * X server. */
+				BUG();
+				overlay->hw_wedged = HW_WEDGED;
+				break;
+			}
+		}
+		mutex_unlock(&overlay->dev->struct_mutex);
+	}
+	/* Let userspace switch the overlay on again. In most cases userspace
+	 * has to recompute where to put it anyway. */
+
+	return;
+}
+
 static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
 {
 	struct drm_device *dev = crtc->dev;
@@ -1839,12 +1916,14 @@
 			intel_update_fbc(crtc, &crtc->mode);
 
 		/* Give the overlay scaler a chance to enable if it's on this pipe */
-		//intel_crtc_dpms_video(crtc, true); TODO
+		intel_crtc_dpms_overlay(intel_crtc, true);
 	break;
 	case DRM_MODE_DPMS_OFF:
 		intel_update_watermarks(dev);
+
 		/* Give the overlay scaler a chance to disable if it's on this pipe */
-		//intel_crtc_dpms_video(crtc, FALSE); TODO
+		intel_crtc_dpms_overlay(intel_crtc, false);
+		drm_vblank_off(dev, pipe);
 
 		if (dev_priv->cfb_plane == plane &&
 		    dev_priv->display.disable_fbc)
@@ -1963,7 +2042,7 @@
 				  struct drm_display_mode *adjusted_mode)
 {
 	struct drm_device *dev = crtc->dev;
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		/* FDI link clock is fixed at 2.7G */
 		if (mode->clock * 3 > 27000 * 4)
 			return MODE_CLOCK_HIGH;
@@ -2039,7 +2118,7 @@
  * Return the pipe currently connected to the panel fitter,
  * or -1 if the panel fitter is not present or not in use
  */
-static int intel_panel_fitter_pipe (struct drm_device *dev)
+int intel_panel_fitter_pipe (struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32  pfit_control;
@@ -2083,9 +2162,8 @@
 #define LINK_N 0x80000
 
 static void
-igdng_compute_m_n(int bits_per_pixel, int nlanes,
-		int pixel_clock, int link_clock,
-		struct fdi_m_n *m_n)
+ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
+		     int link_clock, struct fdi_m_n *m_n)
 {
 	u64 temp;
 
@@ -2113,34 +2191,34 @@
 	unsigned long cacheline_size;
 };
 
-/* IGD has different values for various configs */
-static struct intel_watermark_params igd_display_wm = {
-	IGD_DISPLAY_FIFO,
-	IGD_MAX_WM,
-	IGD_DFT_WM,
-	IGD_GUARD_WM,
-	IGD_FIFO_LINE_SIZE
+/* Pineview has different values for various configs */
+static struct intel_watermark_params pineview_display_wm = {
+	PINEVIEW_DISPLAY_FIFO,
+	PINEVIEW_MAX_WM,
+	PINEVIEW_DFT_WM,
+	PINEVIEW_GUARD_WM,
+	PINEVIEW_FIFO_LINE_SIZE
 };
-static struct intel_watermark_params igd_display_hplloff_wm = {
-	IGD_DISPLAY_FIFO,
-	IGD_MAX_WM,
-	IGD_DFT_HPLLOFF_WM,
-	IGD_GUARD_WM,
-	IGD_FIFO_LINE_SIZE
+static struct intel_watermark_params pineview_display_hplloff_wm = {
+	PINEVIEW_DISPLAY_FIFO,
+	PINEVIEW_MAX_WM,
+	PINEVIEW_DFT_HPLLOFF_WM,
+	PINEVIEW_GUARD_WM,
+	PINEVIEW_FIFO_LINE_SIZE
 };
-static struct intel_watermark_params igd_cursor_wm = {
-	IGD_CURSOR_FIFO,
-	IGD_CURSOR_MAX_WM,
-	IGD_CURSOR_DFT_WM,
-	IGD_CURSOR_GUARD_WM,
-	IGD_FIFO_LINE_SIZE,
+static struct intel_watermark_params pineview_cursor_wm = {
+	PINEVIEW_CURSOR_FIFO,
+	PINEVIEW_CURSOR_MAX_WM,
+	PINEVIEW_CURSOR_DFT_WM,
+	PINEVIEW_CURSOR_GUARD_WM,
+	PINEVIEW_FIFO_LINE_SIZE,
 };
-static struct intel_watermark_params igd_cursor_hplloff_wm = {
-	IGD_CURSOR_FIFO,
-	IGD_CURSOR_MAX_WM,
-	IGD_CURSOR_DFT_WM,
-	IGD_CURSOR_GUARD_WM,
-	IGD_FIFO_LINE_SIZE
+static struct intel_watermark_params pineview_cursor_hplloff_wm = {
+	PINEVIEW_CURSOR_FIFO,
+	PINEVIEW_CURSOR_MAX_WM,
+	PINEVIEW_CURSOR_DFT_WM,
+	PINEVIEW_CURSOR_GUARD_WM,
+	PINEVIEW_FIFO_LINE_SIZE
 };
 static struct intel_watermark_params g4x_wm_info = {
 	G4X_FIFO_SIZE,
@@ -2213,11 +2291,11 @@
 		1000;
 	entries_required /= wm->cacheline_size;
 
-	DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required);
+	DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required);
 
 	wm_size = wm->fifo_size - (entries_required + wm->guard_size);
 
-	DRM_DEBUG("FIFO watermark level: %d\n", wm_size);
+	DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
 
 	/* Don't promote wm_size to unsigned... */
 	if (wm_size > (long)wm->max_wm)
@@ -2279,50 +2357,50 @@
 			return latency;
 	}
 
-	DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n");
+	DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
 
 	return NULL;
 }
 
-static void igd_disable_cxsr(struct drm_device *dev)
+static void pineview_disable_cxsr(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 reg;
 
 	/* deactivate cxsr */
 	reg = I915_READ(DSPFW3);
-	reg &= ~(IGD_SELF_REFRESH_EN);
+	reg &= ~(PINEVIEW_SELF_REFRESH_EN);
 	I915_WRITE(DSPFW3, reg);
 	DRM_INFO("Big FIFO is disabled\n");
 }
 
-static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
-			    int pixel_size)
+static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
+				 int pixel_size)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 reg;
 	unsigned long wm;
 	struct cxsr_latency *latency;
 
-	latency = intel_get_cxsr_latency(IS_IGDG(dev), dev_priv->fsb_freq,
+	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
 		dev_priv->mem_freq);
 	if (!latency) {
-		DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n");
-		igd_disable_cxsr(dev);
+		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+		pineview_disable_cxsr(dev);
 		return;
 	}
 
 	/* Display SR */
-	wm = intel_calculate_wm(clock, &igd_display_wm, pixel_size,
+	wm = intel_calculate_wm(clock, &pineview_display_wm, pixel_size,
 				latency->display_sr);
 	reg = I915_READ(DSPFW1);
 	reg &= 0x7fffff;
 	reg |= wm << 23;
 	I915_WRITE(DSPFW1, reg);
-	DRM_DEBUG("DSPFW1 register is %x\n", reg);
+	DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
 
 	/* cursor SR */
-	wm = intel_calculate_wm(clock, &igd_cursor_wm, pixel_size,
+	wm = intel_calculate_wm(clock, &pineview_cursor_wm, pixel_size,
 				latency->cursor_sr);
 	reg = I915_READ(DSPFW3);
 	reg &= ~(0x3f << 24);
@@ -2330,7 +2408,7 @@
 	I915_WRITE(DSPFW3, reg);
 
 	/* Display HPLL off SR */
-	wm = intel_calculate_wm(clock, &igd_display_hplloff_wm,
+	wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
 		latency->display_hpll_disable, I915_FIFO_LINE_SIZE);
 	reg = I915_READ(DSPFW3);
 	reg &= 0xfffffe00;
@@ -2338,17 +2416,17 @@
 	I915_WRITE(DSPFW3, reg);
 
 	/* cursor HPLL off SR */
-	wm = intel_calculate_wm(clock, &igd_cursor_hplloff_wm, pixel_size,
+	wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pixel_size,
 				latency->cursor_hpll_disable);
 	reg = I915_READ(DSPFW3);
 	reg &= ~(0x3f << 16);
 	reg |= (wm & 0x3f) << 16;
 	I915_WRITE(DSPFW3, reg);
-	DRM_DEBUG("DSPFW3 register is %x\n", reg);
+	DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
 
 	/* activate cxsr */
 	reg = I915_READ(DSPFW3);
-	reg |= IGD_SELF_REFRESH_EN;
+	reg |= PINEVIEW_SELF_REFRESH_EN;
 	I915_WRITE(DSPFW3, reg);
 
 	DRM_INFO("Big FIFO is enabled\n");
@@ -2384,8 +2462,8 @@
 		size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
 			(dsparb & 0x7f);
 
-	DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
-		  size);
+	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+			plane ? "B" : "A", size);
 
 	return size;
 }
@@ -2403,8 +2481,8 @@
 			(dsparb & 0x1ff);
 	size >>= 1; /* Convert to cachelines */
 
-	DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
-		  size);
+	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+			plane ? "B" : "A", size);
 
 	return size;
 }
@@ -2418,7 +2496,8 @@
 	size = dsparb & 0x7f;
 	size >>= 2; /* Convert to cachelines */
 
-	DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
+	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+			plane ? "B" : "A",
 		  size);
 
 	return size;
@@ -2433,8 +2512,8 @@
 	size = dsparb & 0x7f;
 	size >>= 1; /* Convert to cachelines */
 
-	DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
-		  size);
+	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+			plane ? "B" : "A", size);
 
 	return size;
 }
@@ -2509,15 +2588,39 @@
 		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
 }
 
-static void i965_update_wm(struct drm_device *dev, int unused, int unused2,
-			   int unused3, int unused4)
+static void i965_update_wm(struct drm_device *dev, int planea_clock,
+			   int planeb_clock, int sr_hdisplay, int pixel_size)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long line_time_us;
+	int sr_clock, sr_entries, srwm = 1;
 
-	DRM_DEBUG("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR 8\n");
+	/* Calc sr entries for one plane configs */
+	if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
+		/* self-refresh has much higher latency */
+		const static int sr_latency_ns = 12000;
+
+		sr_clock = planea_clock ? planea_clock : planeb_clock;
+		line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+
+		/* Use ns/us then divide to preserve precision */
+		sr_entries = (((sr_latency_ns / line_time_us) + 1) *
+			      pixel_size * sr_hdisplay) / 1000;
+		sr_entries = roundup(sr_entries / I915_FIFO_LINE_SIZE, 1);
+		DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
+		srwm = I945_FIFO_SIZE - sr_entries;
+		if (srwm < 0)
+			srwm = 1;
+		srwm &= 0x3f;
+		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+	}
+
+	DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
+		      srwm);
 
 	/* 965 has limitations... */
-	I915_WRITE(DSPFW1, (8 << 16) | (8 << 8) | (8 << 0));
+	I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) |
+		   (8 << 0));
 	I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
 }
 
@@ -2553,7 +2656,7 @@
 				       pixel_size, latency_ns);
 	planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params,
 				       pixel_size, latency_ns);
-	DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
 
 	/*
 	 * Overlay gets an aggressive default since video jitter is bad.
@@ -2573,14 +2676,14 @@
 		sr_entries = (((sr_latency_ns / line_time_us) + 1) *
 			      pixel_size * sr_hdisplay) / 1000;
 		sr_entries = roundup(sr_entries / cacheline_size, 1);
-		DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
+		DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
 		srwm = total_size - sr_entries;
 		if (srwm < 0)
 			srwm = 1;
 		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
 	}
 
-	DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
+	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
 		  planea_wm, planeb_wm, cwm, srwm);
 
 	fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
@@ -2607,7 +2710,7 @@
 				       pixel_size, latency_ns);
 	fwater_lo |= (3<<8) | planea_wm;
 
-	DRM_DEBUG("Setting FIFO watermarks - A: %d\n", planea_wm);
+	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
 
 	I915_WRITE(FW_BLC, fwater_lo);
 }
@@ -2661,11 +2764,11 @@
 		if (crtc->enabled) {
 			enabled++;
 			if (intel_crtc->plane == 0) {
-				DRM_DEBUG("plane A (pipe %d) clock: %d\n",
+				DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
 					  intel_crtc->pipe, crtc->mode.clock);
 				planea_clock = crtc->mode.clock;
 			} else {
-				DRM_DEBUG("plane B (pipe %d) clock: %d\n",
+				DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n",
 					  intel_crtc->pipe, crtc->mode.clock);
 				planeb_clock = crtc->mode.clock;
 			}
@@ -2682,10 +2785,10 @@
 		return;
 
 	/* Single plane configs can enable self refresh */
-	if (enabled == 1 && IS_IGD(dev))
-		igd_enable_cxsr(dev, sr_clock, pixel_size);
-	else if (IS_IGD(dev))
-		igd_disable_cxsr(dev);
+	if (enabled == 1 && IS_PINEVIEW(dev))
+		pineview_enable_cxsr(dev, sr_clock, pixel_size);
+	else if (IS_PINEVIEW(dev))
+		pineview_disable_cxsr(dev);
 
 	dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
 				    sr_hdisplay, pixel_size);
@@ -2779,10 +2882,11 @@
 
 	if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) {
 		refclk = dev_priv->lvds_ssc_freq * 1000;
-		DRM_DEBUG("using SSC reference clock of %d MHz\n", refclk / 1000);
+		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
+					refclk / 1000);
 	} else if (IS_I9XX(dev)) {
 		refclk = 96000;
-		if (IS_IGDNG(dev))
+		if (IS_IRONLAKE(dev))
 			refclk = 120000; /* 120Mhz refclk */
 	} else {
 		refclk = 48000;
@@ -2802,14 +2906,25 @@
 		return -EINVAL;
 	}
 
-	if (limit->find_reduced_pll && dev_priv->lvds_downclock_avail) {
+	if (is_lvds && limit->find_reduced_pll &&
+			dev_priv->lvds_downclock_avail) {
 		memcpy(&reduced_clock, &clock, sizeof(intel_clock_t));
 		has_reduced_clock = limit->find_reduced_pll(limit, crtc,
-							    (adjusted_mode->clock*3/4),
+							    dev_priv->lvds_downclock,
 							    refclk,
 							    &reduced_clock);
+		if (has_reduced_clock && (clock.p != reduced_clock.p)) {
+			/*
+			 * If the different P is found, it means that we can't
+			 * switch the display clock by using the FP0/FP1.
+			 * In such case we will disable the LVDS downclock
+			 * feature.
+			 */
+			DRM_DEBUG_KMS("Different P is found for "
+						"LVDS clock/downclock\n");
+			has_reduced_clock = 0;
+		}
 	}
-
 	/* SDVO TV has fixed PLL values depend on its clock range,
 	   this mirrors vbios setting. */
 	if (is_sdvo && is_tv) {
@@ -2831,7 +2946,7 @@
 	}
 
 	/* FDI link */
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		int lane, link_bw, bpp;
 		/* eDP doesn't require FDI link, so just set DP M/N
 		   according to current link config */
@@ -2873,8 +2988,7 @@
 			bpp = 24;
 		}
 
-		igdng_compute_m_n(bpp, lane, target_clock,
-				  link_bw, &m_n);
+		ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
 	}
 
 	/* Ironlake: try to setup display ref clock before DPLL
@@ -2882,7 +2996,7 @@
 	 * PCH B stepping, previous chipset stepping should be
 	 * ignoring this setting.
 	 */
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		temp = I915_READ(PCH_DREF_CONTROL);
 		/* Always enable nonspread source */
 		temp &= ~DREF_NONSPREAD_SOURCE_MASK;
@@ -2917,7 +3031,7 @@
 		}
 	}
 
-	if (IS_IGD(dev)) {
+	if (IS_PINEVIEW(dev)) {
 		fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
 		if (has_reduced_clock)
 			fp2 = (1 << reduced_clock.n) << 16 |
@@ -2929,7 +3043,7 @@
 				reduced_clock.m2;
 	}
 
-	if (!IS_IGDNG(dev))
+	if (!IS_IRONLAKE(dev))
 		dpll = DPLL_VGA_MODE_DIS;
 
 	if (IS_I9XX(dev)) {
@@ -2942,19 +3056,19 @@
 			sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
 			if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
 				dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
-			else if (IS_IGDNG(dev))
+			else if (IS_IRONLAKE(dev))
 				dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
 		}
 		if (is_dp)
 			dpll |= DPLL_DVO_HIGH_SPEED;
 
 		/* compute bitmask from p1 value */
-		if (IS_IGD(dev))
-			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD;
+		if (IS_PINEVIEW(dev))
+			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
 		else {
 			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
 			/* also FPA1 */
-			if (IS_IGDNG(dev))
+			if (IS_IRONLAKE(dev))
 				dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
 			if (IS_G4X(dev) && has_reduced_clock)
 				dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
@@ -2973,7 +3087,7 @@
 			dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
 			break;
 		}
-		if (IS_I965G(dev) && !IS_IGDNG(dev))
+		if (IS_I965G(dev) && !IS_IRONLAKE(dev))
 			dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
 	} else {
 		if (is_lvds) {
@@ -3005,9 +3119,9 @@
 	/* Set up the display plane register */
 	dspcntr = DISPPLANE_GAMMA_ENABLE;
 
-	/* IGDNG's plane is forced to pipe, bit 24 is to
+	/* Ironlake's plane is forced to pipe, bit 24 is to
 	   enable color space conversion */
-	if (!IS_IGDNG(dev)) {
+	if (!IS_IRONLAKE(dev)) {
 		if (pipe == 0)
 			dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
 		else
@@ -3034,20 +3148,20 @@
 
 
 	/* Disable the panel fitter if it was on our pipe */
-	if (!IS_IGDNG(dev) && intel_panel_fitter_pipe(dev) == pipe)
+	if (!IS_IRONLAKE(dev) && intel_panel_fitter_pipe(dev) == pipe)
 		I915_WRITE(PFIT_CONTROL, 0);
 
-	DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
+	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
 	drm_mode_debug_printmodeline(mode);
 
-	/* assign to IGDNG registers */
-	if (IS_IGDNG(dev)) {
+	/* assign to Ironlake registers */
+	if (IS_IRONLAKE(dev)) {
 		fp_reg = pch_fp_reg;
 		dpll_reg = pch_dpll_reg;
 	}
 
 	if (is_edp) {
-		igdng_disable_pll_edp(crtc);
+		ironlake_disable_pll_edp(crtc);
 	} else if ((dpll & DPLL_VCO_ENABLE)) {
 		I915_WRITE(fp_reg, fp);
 		I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
@@ -3062,7 +3176,7 @@
 	if (is_lvds) {
 		u32 lvds;
 
-		if (IS_IGDNG(dev))
+		if (IS_IRONLAKE(dev))
 			lvds_reg = PCH_LVDS;
 
 		lvds = I915_READ(lvds_reg);
@@ -3095,7 +3209,7 @@
 		/* Wait for the clocks to stabilize. */
 		udelay(150);
 
-		if (IS_I965G(dev) && !IS_IGDNG(dev)) {
+		if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
 			if (is_sdvo) {
 				sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
 				I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
@@ -3115,14 +3229,14 @@
 		I915_WRITE(fp_reg + 4, fp2);
 		intel_crtc->lowfreq_avail = true;
 		if (HAS_PIPE_CXSR(dev)) {
-			DRM_DEBUG("enabling CxSR downclocking\n");
+			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
 			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
 		}
 	} else {
 		I915_WRITE(fp_reg + 4, fp);
 		intel_crtc->lowfreq_avail = false;
 		if (HAS_PIPE_CXSR(dev)) {
-			DRM_DEBUG("disabling CxSR downclocking\n");
+			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
 			pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
 		}
 	}
@@ -3142,21 +3256,21 @@
 	/* pipesrc and dspsize control the size that is scaled from, which should
 	 * always be the user's requested size.
 	 */
-	if (!IS_IGDNG(dev)) {
+	if (!IS_IRONLAKE(dev)) {
 		I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
 				(mode->hdisplay - 1));
 		I915_WRITE(dsppos_reg, 0);
 	}
 	I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
 		I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
 		I915_WRITE(link_m1_reg, m_n.link_m);
 		I915_WRITE(link_n1_reg, m_n.link_n);
 
 		if (is_edp) {
-			igdng_set_pll_edp(crtc, adjusted_mode->clock);
+			ironlake_set_pll_edp(crtc, adjusted_mode->clock);
 		} else {
 			/* enable FDI RX PLL too */
 			temp = I915_READ(fdi_rx_reg);
@@ -3170,7 +3284,7 @@
 
 	intel_wait_for_vblank(dev);
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		/* enable address swizzle for tiling buffer */
 		temp = I915_READ(DISP_ARB_CTL);
 		I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
@@ -3204,8 +3318,8 @@
 	if (!crtc->enabled)
 		return;
 
-	/* use legacy palette for IGDNG */
-	if (IS_IGDNG(dev))
+	/* use legacy palette for Ironlake */
+	if (IS_IRONLAKE(dev))
 		palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
 						   LGC_PALETTE_B;
 
@@ -3234,11 +3348,11 @@
 	size_t addr;
 	int ret;
 
-	DRM_DEBUG("\n");
+	DRM_DEBUG_KMS("\n");
 
 	/* if we want to turn off the cursor ignore width and height */
 	if (!handle) {
-		DRM_DEBUG("cursor off\n");
+		DRM_DEBUG_KMS("cursor off\n");
 		if (IS_MOBILE(dev) || IS_I9XX(dev)) {
 			temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
 			temp |= CURSOR_MODE_DISABLE;
@@ -3546,18 +3660,18 @@
 		fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
 
 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
-	if (IS_IGD(dev)) {
-		clock.n = ffs((fp & FP_N_IGD_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
-		clock.m2 = (fp & FP_M2_IGD_DIV_MASK) >> FP_M2_DIV_SHIFT;
+	if (IS_PINEVIEW(dev)) {
+		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
+		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
 	} else {
 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
 	}
 
 	if (IS_I9XX(dev)) {
-		if (IS_IGD(dev))
-			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_IGD) >>
-				DPLL_FPA01_P1_POST_DIV_SHIFT_IGD);
+		if (IS_PINEVIEW(dev))
+			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
+				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
 		else
 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
@@ -3572,7 +3686,7 @@
 				7 : 14;
 			break;
 		default:
-			DRM_DEBUG("Unknown DPLL mode %08x in programmed "
+			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
 				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
 			return 0;
 		}
@@ -3658,7 +3772,7 @@
 	struct drm_device *dev = (struct drm_device *)arg;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
-	DRM_DEBUG("idle timer fired, downclocking\n");
+	DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
 
 	dev_priv->busy = false;
 
@@ -3669,11 +3783,11 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		return;
 
 	if (!dev_priv->render_reclock_avail) {
-		DRM_DEBUG("not reclocking render clock\n");
+		DRM_DEBUG_DRIVER("not reclocking render clock\n");
 		return;
 	}
 
@@ -3682,7 +3796,7 @@
 		pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock);
 	else if (IS_I85X(dev))
 		pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock);
-	DRM_DEBUG("increasing render clock frequency\n");
+	DRM_DEBUG_DRIVER("increasing render clock frequency\n");
 
 	/* Schedule downclock */
 	if (schedule)
@@ -3694,11 +3808,11 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		return;
 
 	if (!dev_priv->render_reclock_avail) {
-		DRM_DEBUG("not reclocking render clock\n");
+		DRM_DEBUG_DRIVER("not reclocking render clock\n");
 		return;
 	}
 
@@ -3758,7 +3872,7 @@
 
 		pci_write_config_word(dev->pdev, HPLLCC, hpllcc);
 	}
-	DRM_DEBUG("decreasing render clock frequency\n");
+	DRM_DEBUG_DRIVER("decreasing render clock frequency\n");
 }
 
 /* Note that no increase function is needed for this - increase_renderclock()
@@ -3766,7 +3880,7 @@
  */
 void intel_decrease_displayclock(struct drm_device *dev)
 {
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		return;
 
 	if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) ||
@@ -3792,7 +3906,7 @@
 	struct drm_crtc *crtc = &intel_crtc->base;
 	drm_i915_private_t *dev_priv = crtc->dev->dev_private;
 
-	DRM_DEBUG("idle timer fired, downclocking\n");
+	DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
 
 	intel_crtc->busy = false;
 
@@ -3808,14 +3922,14 @@
 	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
 	int dpll = I915_READ(dpll_reg);
 
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		return;
 
 	if (!dev_priv->lvds_downclock_avail)
 		return;
 
 	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
-		DRM_DEBUG("upclocking LVDS\n");
+		DRM_DEBUG_DRIVER("upclocking LVDS\n");
 
 		/* Unlock panel regs */
 		I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
@@ -3826,7 +3940,7 @@
 		intel_wait_for_vblank(dev);
 		dpll = I915_READ(dpll_reg);
 		if (dpll & DISPLAY_RATE_SELECT_FPA1)
-			DRM_DEBUG("failed to upclock LVDS!\n");
+			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
 
 		/* ...and lock them again */
 		I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
@@ -3847,7 +3961,7 @@
 	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
 	int dpll = I915_READ(dpll_reg);
 
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		return;
 
 	if (!dev_priv->lvds_downclock_avail)
@@ -3858,7 +3972,7 @@
 	 * the manual case.
 	 */
 	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
-		DRM_DEBUG("downclocking LVDS\n");
+		DRM_DEBUG_DRIVER("downclocking LVDS\n");
 
 		/* Unlock panel regs */
 		I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
@@ -3869,7 +3983,7 @@
 		intel_wait_for_vblank(dev);
 		dpll = I915_READ(dpll_reg);
 		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
-			DRM_DEBUG("failed to downclock LVDS!\n");
+			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
 
 		/* ...and lock them again */
 		I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
@@ -3936,8 +4050,13 @@
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return;
 
-	dev_priv->busy = true;
-	intel_increase_renderclock(dev, true);
+	if (!dev_priv->busy) {
+		dev_priv->busy = true;
+		intel_increase_renderclock(dev, true);
+	} else {
+		mod_timer(&dev_priv->idle_timer, jiffies +
+			  msecs_to_jiffies(GPU_IDLE_TIMEOUT));
+	}
 
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		if (!crtc->fb)
@@ -3967,6 +4086,158 @@
 	kfree(intel_crtc);
 }
 
+struct intel_unpin_work {
+	struct work_struct work;
+	struct drm_device *dev;
+	struct drm_gem_object *obj;
+	struct drm_pending_vblank_event *event;
+	int pending;
+};
+
+static void intel_unpin_work_fn(struct work_struct *__work)
+{
+	struct intel_unpin_work *work =
+		container_of(__work, struct intel_unpin_work, work);
+
+	mutex_lock(&work->dev->struct_mutex);
+	i915_gem_object_unpin(work->obj);
+	drm_gem_object_unreference(work->obj);
+	mutex_unlock(&work->dev->struct_mutex);
+	kfree(work);
+}
+
+void intel_finish_page_flip(struct drm_device *dev, int pipe)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_unpin_work *work;
+	struct drm_i915_gem_object *obj_priv;
+	struct drm_pending_vblank_event *e;
+	struct timeval now;
+	unsigned long flags;
+
+	/* Ignore early vblank irqs */
+	if (intel_crtc == NULL)
+		return;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	work = intel_crtc->unpin_work;
+	if (work == NULL || !work->pending) {
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		return;
+	}
+
+	intel_crtc->unpin_work = NULL;
+	drm_vblank_put(dev, intel_crtc->pipe);
+
+	if (work->event) {
+		e = work->event;
+		do_gettimeofday(&now);
+		e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe);
+		e->event.tv_sec = now.tv_sec;
+		e->event.tv_usec = now.tv_usec;
+		list_add_tail(&e->base.link,
+			      &e->base.file_priv->event_list);
+		wake_up_interruptible(&e->base.file_priv->event_wait);
+	}
+
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	obj_priv = work->obj->driver_private;
+	if (atomic_dec_and_test(&obj_priv->pending_flip))
+		DRM_WAKEUP(&dev_priv->pending_flip_queue);
+	schedule_work(&work->work);
+}
+
+void intel_prepare_page_flip(struct drm_device *dev, int plane)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	if (intel_crtc->unpin_work)
+		intel_crtc->unpin_work->pending = 1;
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static int intel_crtc_page_flip(struct drm_crtc *crtc,
+				struct drm_framebuffer *fb,
+				struct drm_pending_vblank_event *event)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_framebuffer *intel_fb;
+	struct drm_i915_gem_object *obj_priv;
+	struct drm_gem_object *obj;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_unpin_work *work;
+	unsigned long flags;
+	int ret;
+	RING_LOCALS;
+
+	work = kzalloc(sizeof *work, GFP_KERNEL);
+	if (work == NULL)
+		return -ENOMEM;
+
+	mutex_lock(&dev->struct_mutex);
+
+	work->event = event;
+	work->dev = crtc->dev;
+	intel_fb = to_intel_framebuffer(crtc->fb);
+	work->obj = intel_fb->obj;
+	INIT_WORK(&work->work, intel_unpin_work_fn);
+
+	/* We borrow the event spin lock for protecting unpin_work */
+	spin_lock_irqsave(&dev->event_lock, flags);
+	if (intel_crtc->unpin_work) {
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		kfree(work);
+		mutex_unlock(&dev->struct_mutex);
+		return -EBUSY;
+	}
+	intel_crtc->unpin_work = work;
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	intel_fb = to_intel_framebuffer(fb);
+	obj = intel_fb->obj;
+
+	ret = intel_pin_and_fence_fb_obj(dev, obj);
+	if (ret != 0) {
+		kfree(work);
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	/* Reference the old fb object for the scheduled work. */
+	drm_gem_object_reference(work->obj);
+
+	crtc->fb = fb;
+	i915_gem_object_flush_write_domain(obj);
+	drm_vblank_get(dev, intel_crtc->pipe);
+	obj_priv = obj->driver_private;
+	atomic_inc(&obj_priv->pending_flip);
+
+	BEGIN_LP_RING(4);
+	OUT_RING(MI_DISPLAY_FLIP |
+		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+	OUT_RING(fb->pitch);
+	if (IS_I965G(dev)) {
+		OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
+		OUT_RING((fb->width << 16) | fb->height);
+	} else {
+		OUT_RING(obj_priv->gtt_offset);
+		OUT_RING(MI_NOOP);
+	}
+	ADVANCE_LP_RING();
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
 	.dpms = intel_crtc_dpms,
 	.mode_fixup = intel_crtc_mode_fixup,
@@ -3983,11 +4254,13 @@
 	.gamma_set = intel_crtc_gamma_set,
 	.set_config = drm_crtc_helper_set_config,
 	.destroy = intel_crtc_destroy,
+	.page_flip = intel_crtc_page_flip,
 };
 
 
 static void intel_crtc_init(struct drm_device *dev, int pipe)
 {
+	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc;
 	int i;
 
@@ -4010,10 +4283,15 @@
 	intel_crtc->pipe = pipe;
 	intel_crtc->plane = pipe;
 	if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) {
-		DRM_DEBUG("swapping pipes & planes for FBC\n");
+		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
 		intel_crtc->plane = ((pipe == 0) ? 1 : 0);
 	}
 
+	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
+	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
+	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
+
 	intel_crtc->cursor_addr = 0;
 	intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
 	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
@@ -4090,7 +4368,7 @@
 	if (IS_MOBILE(dev) && !IS_I830(dev))
 		intel_lvds_init(dev);
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		int found;
 
 		if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
@@ -4118,7 +4396,7 @@
 		if (I915_READ(PCH_DP_D) & DP_DETECTED)
 			intel_dp_init(dev, PCH_DP_D);
 
-	} else if (IS_I9XX(dev)) {
+	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
 		bool found = false;
 
 		if (I915_READ(SDVOB) & SDVO_DETECTED) {
@@ -4145,10 +4423,10 @@
 
 		if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED))
 			intel_dp_init(dev, DP_D);
-	} else
+	} else if (IS_I8XX(dev))
 		intel_dvo_init(dev);
 
-	if (IS_I9XX(dev) && IS_MOBILE(dev) && !IS_IGDNG(dev))
+	if (SUPPORTS_TV(dev))
 		intel_tv_init(dev);
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -4257,7 +4535,7 @@
 	 * Disable clock gating reported to work incorrectly according to the
 	 * specs, but enable as much else as we can.
 	 */
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		return;
 	} else if (IS_G4X(dev)) {
 		uint32_t dspclk_gate;
@@ -4291,11 +4569,52 @@
 		dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
 			DSTATE_DOT_CLOCK_GATING;
 		I915_WRITE(D_STATE, dstate);
-	} else if (IS_I855(dev) || IS_I865G(dev)) {
+	} else if (IS_I85X(dev) || IS_I865G(dev)) {
 		I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
 	} else if (IS_I830(dev)) {
 		I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
 	}
+
+	/*
+	 * GPU can automatically power down the render unit if given a page
+	 * to save state.
+	 */
+	if (I915_HAS_RC6(dev)) {
+		struct drm_gem_object *pwrctx;
+		struct drm_i915_gem_object *obj_priv;
+		int ret;
+
+		if (dev_priv->pwrctx) {
+			obj_priv = dev_priv->pwrctx->driver_private;
+		} else {
+			pwrctx = drm_gem_object_alloc(dev, 4096);
+			if (!pwrctx) {
+				DRM_DEBUG("failed to alloc power context, "
+					  "RC6 disabled\n");
+				goto out;
+			}
+
+			ret = i915_gem_object_pin(pwrctx, 4096);
+			if (ret) {
+				DRM_ERROR("failed to pin power context: %d\n",
+					  ret);
+				drm_gem_object_unreference(pwrctx);
+				goto out;
+			}
+
+			i915_gem_object_set_to_gtt_domain(pwrctx, 1);
+
+			dev_priv->pwrctx = pwrctx;
+			obj_priv = pwrctx->driver_private;
+		}
+
+		I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
+		I915_WRITE(MCHBAR_RENDER_STANDBY,
+			   I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
+	}
+
+out:
+	return;
 }
 
 /* Set up chip specific display functions */
@@ -4304,8 +4623,8 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	/* We always want a DPMS function */
-	if (IS_IGDNG(dev))
-		dev_priv->display.dpms = igdng_crtc_dpms;
+	if (IS_IRONLAKE(dev))
+		dev_priv->display.dpms = ironlake_crtc_dpms;
 	else
 		dev_priv->display.dpms = i9xx_crtc_dpms;
 
@@ -4324,13 +4643,13 @@
 	}
 
 	/* Returns the core display clock speed */
-	if (IS_I945G(dev))
+	if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev)))
 		dev_priv->display.get_display_clock_speed =
 			i945_get_display_clock_speed;
 	else if (IS_I915G(dev))
 		dev_priv->display.get_display_clock_speed =
 			i915_get_display_clock_speed;
-	else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
+	else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
 		dev_priv->display.get_display_clock_speed =
 			i9xx_misc_get_display_clock_speed;
 	else if (IS_I915GM(dev))
@@ -4339,7 +4658,7 @@
 	else if (IS_I865G(dev))
 		dev_priv->display.get_display_clock_speed =
 			i865_get_display_clock_speed;
-	else if (IS_I855(dev))
+	else if (IS_I85X(dev))
 		dev_priv->display.get_display_clock_speed =
 			i855_get_display_clock_speed;
 	else /* 852, 830 */
@@ -4347,7 +4666,7 @@
 			i830_get_display_clock_speed;
 
 	/* For FIFO watermark updates */
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		dev_priv->display.update_wm = NULL;
 	else if (IS_G4X(dev))
 		dev_priv->display.update_wm = g4x_update_wm;
@@ -4403,7 +4722,7 @@
 		num_pipe = 2;
 	else
 		num_pipe = 1;
-	DRM_DEBUG("%d display pipe%s available.\n",
+	DRM_DEBUG_KMS("%d display pipe%s available.\n",
 		  num_pipe, num_pipe > 1 ? "s" : "");
 
 	if (IS_I85X(dev))
@@ -4422,6 +4741,15 @@
 	INIT_WORK(&dev_priv->idle_work, intel_idle_update);
 	setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
 		    (unsigned long)dev);
+
+	intel_setup_overlay(dev);
+
+	if (IS_PINEVIEW(dev) && !intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+							dev_priv->fsb_freq,
+							dev_priv->mem_freq))
+		DRM_INFO("failed to find known CxSR latency "
+			 "(found fsb freq %d, mem freq %d), disabling CxSR\n",
+			 dev_priv->fsb_freq, dev_priv->mem_freq);
 }
 
 void intel_modeset_cleanup(struct drm_device *dev)
@@ -4445,11 +4773,21 @@
 	intel_increase_renderclock(dev, false);
 	del_timer_sync(&dev_priv->idle_timer);
 
-	mutex_unlock(&dev->struct_mutex);
-
 	if (dev_priv->display.disable_fbc)
 		dev_priv->display.disable_fbc(dev);
 
+	if (dev_priv->pwrctx) {
+		struct drm_i915_gem_object *obj_priv;
+
+		obj_priv = dev_priv->pwrctx->driver_private;
+		I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN);
+		I915_READ(PWRCTXA);
+		i915_gem_object_unpin(dev_priv->pwrctx);
+		drm_gem_object_unreference(dev_priv->pwrctx);
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
 	drm_mode_config_cleanup(dev);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d834475..4e7aa8b 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -33,7 +33,8 @@
 #include "intel_drv.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
-#include "intel_dp.h"
+#include "drm_dp_helper.h"
+
 
 #define DP_LINK_STATUS_SIZE	6
 #define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
@@ -223,8 +224,8 @@
 	 */
 	if (IS_eDP(intel_output))
 		aux_clock_divider = 225; /* eDP input clock at 450Mhz */
-	else if (IS_IGDNG(dev))
-		aux_clock_divider = 62; /* IGDNG: input clock fixed at 125Mhz */
+	else if (IS_IRONLAKE(dev))
+		aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
 	else
 		aux_clock_divider = intel_hrawclk(dev) / 2;
 
@@ -282,7 +283,7 @@
 	/* Timeouts occur when the device isn't connected, so they're
 	 * "normal" -- don't fill the kernel log with these */
 	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
-		DRM_DEBUG("dp_aux_ch timeout status 0x%08x\n", status);
+		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
 		return -ETIMEDOUT;
 	}
 
@@ -382,17 +383,77 @@
 }
 
 static int
-intel_dp_i2c_aux_ch(struct i2c_adapter *adapter,
-		    uint8_t *send, int send_bytes,
-		    uint8_t *recv, int recv_bytes)
+intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+		    uint8_t write_byte, uint8_t *read_byte)
 {
+	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
 	struct intel_dp_priv *dp_priv = container_of(adapter,
 						     struct intel_dp_priv,
 						     adapter);
 	struct intel_output *intel_output = dp_priv->intel_output;
+	uint16_t address = algo_data->address;
+	uint8_t msg[5];
+	uint8_t reply[2];
+	int msg_bytes;
+	int reply_bytes;
+	int ret;
 
-	return intel_dp_aux_ch(intel_output,
-			       send, send_bytes, recv, recv_bytes);
+	/* Set up the command byte */
+	if (mode & MODE_I2C_READ)
+		msg[0] = AUX_I2C_READ << 4;
+	else
+		msg[0] = AUX_I2C_WRITE << 4;
+
+	if (!(mode & MODE_I2C_STOP))
+		msg[0] |= AUX_I2C_MOT << 4;
+
+	msg[1] = address >> 8;
+	msg[2] = address;
+
+	switch (mode) {
+	case MODE_I2C_WRITE:
+		msg[3] = 0;
+		msg[4] = write_byte;
+		msg_bytes = 5;
+		reply_bytes = 1;
+		break;
+	case MODE_I2C_READ:
+		msg[3] = 0;
+		msg_bytes = 4;
+		reply_bytes = 2;
+		break;
+	default:
+		msg_bytes = 3;
+		reply_bytes = 1;
+		break;
+	}
+
+	for (;;) {
+	  ret = intel_dp_aux_ch(intel_output,
+				msg, msg_bytes,
+				reply, reply_bytes);
+		if (ret < 0) {
+			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
+			return ret;
+		}
+		switch (reply[0] & AUX_I2C_REPLY_MASK) {
+		case AUX_I2C_REPLY_ACK:
+			if (mode == MODE_I2C_READ) {
+				*read_byte = reply[1];
+			}
+			return reply_bytes - 1;
+		case AUX_I2C_REPLY_NACK:
+			DRM_DEBUG_KMS("aux_ch nack\n");
+			return -EREMOTEIO;
+		case AUX_I2C_REPLY_DEFER:
+			DRM_DEBUG_KMS("aux_ch defer\n");
+			udelay(100);
+			break;
+		default:
+			DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
+			return -EREMOTEIO;
+		}
+	}
 }
 
 static int
@@ -435,7 +496,8 @@
 				dp_priv->link_bw = bws[clock];
 				dp_priv->lane_count = lane_count;
 				adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
-				DRM_DEBUG("Display port link bw %02x lane count %d clock %d\n",
+				DRM_DEBUG_KMS("Display port link bw %02x lane "
+						"count %d clock %d\n",
 				       dp_priv->link_bw, dp_priv->lane_count,
 				       adjusted_mode->clock);
 				return true;
@@ -514,7 +576,7 @@
 	intel_dp_compute_m_n(3, lane_count,
 			     mode->clock, adjusted_mode->clock, &m_n);
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		if (intel_crtc->pipe == 0) {
 			I915_WRITE(TRANSA_DATA_M1,
 				   ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
@@ -606,23 +668,23 @@
 	}
 }
 
-static void igdng_edp_backlight_on (struct drm_device *dev)
+static void ironlake_edp_backlight_on (struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 pp;
 
-	DRM_DEBUG("\n");
+	DRM_DEBUG_KMS("\n");
 	pp = I915_READ(PCH_PP_CONTROL);
 	pp |= EDP_BLC_ENABLE;
 	I915_WRITE(PCH_PP_CONTROL, pp);
 }
 
-static void igdng_edp_backlight_off (struct drm_device *dev)
+static void ironlake_edp_backlight_off (struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 pp;
 
-	DRM_DEBUG("\n");
+	DRM_DEBUG_KMS("\n");
 	pp = I915_READ(PCH_PP_CONTROL);
 	pp &= ~EDP_BLC_ENABLE;
 	I915_WRITE(PCH_PP_CONTROL, pp);
@@ -641,13 +703,13 @@
 		if (dp_reg & DP_PORT_EN) {
 			intel_dp_link_down(intel_output, dp_priv->DP);
 			if (IS_eDP(intel_output))
-				igdng_edp_backlight_off(dev);
+				ironlake_edp_backlight_off(dev);
 		}
 	} else {
 		if (!(dp_reg & DP_PORT_EN)) {
 			intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
 			if (IS_eDP(intel_output))
-				igdng_edp_backlight_on(dev);
+				ironlake_edp_backlight_on(dev);
 		}
 	}
 	dp_priv->dpms_mode = mode;
@@ -1010,7 +1072,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_dp_priv *dp_priv = intel_output->dev_priv;
 
-	DRM_DEBUG("\n");
+	DRM_DEBUG_KMS("\n");
 
 	if (IS_eDP(intel_output)) {
 		DP &= ~DP_PLL_ENABLE;
@@ -1071,7 +1133,7 @@
 }
 
 static enum drm_connector_status
-igdng_dp_detect(struct drm_connector *connector)
+ironlake_dp_detect(struct drm_connector *connector)
 {
 	struct intel_output *intel_output = to_intel_output(connector);
 	struct intel_dp_priv *dp_priv = intel_output->dev_priv;
@@ -1106,8 +1168,8 @@
 
 	dp_priv->has_audio = false;
 
-	if (IS_IGDNG(dev))
-		return igdng_dp_detect(connector);
+	if (IS_IRONLAKE(dev))
+		return ironlake_dp_detect(connector);
 
 	temp = I915_READ(PORT_HOTPLUG_EN);
 
@@ -1227,7 +1289,53 @@
 	if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
 		intel_dp_check_link_status(intel_output);
 }
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the given DP is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it is assumed that the given
+ * DP is present.
+ */
+static int dp_is_present_in_vbt(struct drm_device *dev, int dp_reg)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct child_device_config *p_child;
+	int i, dp_port, ret;
 
+	if (!dev_priv->child_dev_num)
+		return 1;
+
+	dp_port = 0;
+	if (dp_reg == DP_B || dp_reg == PCH_DP_B)
+		dp_port = PORT_IDPB;
+	else if (dp_reg == DP_C || dp_reg == PCH_DP_C)
+		dp_port = PORT_IDPC;
+	else if (dp_reg == DP_D || dp_reg == PCH_DP_D)
+		dp_port = PORT_IDPD;
+
+	ret = 0;
+	for (i = 0; i < dev_priv->child_dev_num; i++) {
+		p_child = dev_priv->child_dev + i;
+		/*
+		 * If the device type is not DP, continue.
+		 */
+		if (p_child->device_type != DEVICE_TYPE_DP &&
+			p_child->device_type != DEVICE_TYPE_eDP)
+			continue;
+		/* Find the eDP port */
+		if (dp_reg == DP_A && p_child->device_type == DEVICE_TYPE_eDP) {
+			ret = 1;
+			break;
+		}
+		/* Find the DP port */
+		if (p_child->dvo_port == dp_port) {
+			ret = 1;
+			break;
+		}
+	}
+	return ret;
+}
 void
 intel_dp_init(struct drm_device *dev, int output_reg)
 {
@@ -1237,6 +1345,10 @@
 	struct intel_dp_priv *dp_priv;
 	const char *name = NULL;
 
+	if (!dp_is_present_in_vbt(dev, output_reg)) {
+		DRM_DEBUG_KMS("DP is not present. Ignore it\n");
+		return;
+	}
 	intel_output = kcalloc(sizeof(struct intel_output) + 
 			       sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
 	if (!intel_output)
@@ -1254,11 +1366,11 @@
 	else
 		intel_output->type = INTEL_OUTPUT_DISPLAYPORT;
 
-	if (output_reg == DP_B)
+	if (output_reg == DP_B || output_reg == PCH_DP_B)
 		intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
-	else if (output_reg == DP_C)
+	else if (output_reg == DP_C || output_reg == PCH_DP_C)
 		intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
-	else if (output_reg == DP_D)
+	else if (output_reg == DP_D || output_reg == PCH_DP_D)
 		intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
 
 	if (IS_eDP(intel_output)) {
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ef61fe9..a51573d 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -110,6 +110,32 @@
 	int clone_mask;
 };
 
+struct intel_crtc;
+struct intel_overlay {
+	struct drm_device *dev;
+	struct intel_crtc *crtc;
+	struct drm_i915_gem_object *vid_bo;
+	struct drm_i915_gem_object *old_vid_bo;
+	int active;
+	int pfit_active;
+	u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
+	u32 color_key;
+	u32 brightness, contrast, saturation;
+	u32 old_xscale, old_yscale;
+	/* register access */
+	u32 flip_addr;
+	struct drm_i915_gem_object *reg_bo;
+	void *virt_addr;
+	/* flip handling */
+	uint32_t last_flip_req;
+	int hw_wedged;
+#define HW_WEDGED		1
+#define NEEDS_WAIT_FOR_FLIP	2
+#define RELEASE_OLD_VID		3
+#define SWITCH_OFF_STAGE_1	4
+#define SWITCH_OFF_STAGE_2	5
+};
+
 struct intel_crtc {
 	struct drm_crtc base;
 	enum pipe pipe;
@@ -121,6 +147,8 @@
 	bool busy; /* is scanout buffer being updated frequently? */
 	struct timer_list idle_timer;
 	bool lowfreq_avail;
+	struct intel_overlay *overlay;
+	struct intel_unpin_work *unpin_work;
 };
 
 #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
@@ -134,6 +162,8 @@
 int intel_ddc_get_modes(struct intel_output *intel_output);
 extern bool intel_ddc_probe(struct intel_output *intel_output);
 void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
+void intel_i2c_reset_gmbus(struct drm_device *dev);
+
 extern void intel_crt_init(struct drm_device *dev);
 extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
 extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
@@ -148,6 +178,7 @@
 extern void intel_edp_link_config (struct intel_output *, int *, int *);
 
 
+extern int intel_panel_fitter_pipe (struct drm_device *dev);
 extern void intel_crtc_load_lut(struct drm_crtc *crtc);
 extern void intel_encoder_prepare (struct drm_encoder *encoder);
 extern void intel_encoder_commit (struct drm_encoder *encoder);
@@ -177,10 +208,23 @@
 				    u16 blue, int regno);
 extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
 				    u16 *blue, int regno);
+extern void intel_init_clock_gating(struct drm_device *dev);
 
 extern int intel_framebuffer_create(struct drm_device *dev,
 				    struct drm_mode_fb_cmd *mode_cmd,
 				    struct drm_framebuffer **fb,
 				    struct drm_gem_object *obj);
 
+extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
+extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
+
+extern void intel_setup_overlay(struct drm_device *dev);
+extern void intel_cleanup_overlay(struct drm_device *dev);
+extern int intel_overlay_switch_off(struct intel_overlay *overlay);
+extern int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
+						int interruptible);
+extern int intel_overlay_put_image(struct drm_device *dev, void *data,
+				   struct drm_file *file_priv);
+extern int intel_overlay_attrs(struct drm_device *dev, void *data,
+			       struct drm_file *file_priv);
 #endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 40fcf6f..371d753 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -230,8 +230,9 @@
 	par->intel_fb = intel_fb;
 
 	/* To allow resizeing without swapping buffers */
-	DRM_DEBUG("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width,
-		  intel_fb->base.height, obj_priv->gtt_offset, fbo);
+	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
+			intel_fb->base.width, intel_fb->base.height,
+			obj_priv->gtt_offset, fbo);
 
 	mutex_unlock(&dev->struct_mutex);
 	return 0;
@@ -249,7 +250,7 @@
 {
 	int ret;
 
-	DRM_DEBUG("\n");
+	DRM_DEBUG_KMS("\n");
 	ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create);
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index c33451a..f04dbbe 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -82,7 +82,7 @@
 	/* HW workaround, need to toggle enable bit off and on for 12bpc, but
 	 * we do this anyway which shows more stable in testing.
 	 */
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
 		POSTING_READ(hdmi_priv->sdvox_reg);
 	}
@@ -99,7 +99,7 @@
 	/* HW workaround, need to write this twice for issue that may result
 	 * in first write getting masked.
 	 */
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		I915_WRITE(hdmi_priv->sdvox_reg, temp);
 		POSTING_READ(hdmi_priv->sdvox_reg);
 	}
@@ -225,7 +225,52 @@
 	.destroy = intel_hdmi_enc_destroy,
 };
 
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the given HDMI is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the given
+ * HDMI is present.
+ */
+static int hdmi_is_present_in_vbt(struct drm_device *dev, int hdmi_reg)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct child_device_config *p_child;
+	int i, hdmi_port, ret;
 
+	if (!dev_priv->child_dev_num)
+		return 1;
+
+	if (hdmi_reg == SDVOB)
+		hdmi_port = DVO_B;
+	else if (hdmi_reg == SDVOC)
+		hdmi_port = DVO_C;
+	else if (hdmi_reg == HDMIB)
+		hdmi_port = DVO_B;
+	else if (hdmi_reg == HDMIC)
+		hdmi_port = DVO_C;
+	else if (hdmi_reg == HDMID)
+		hdmi_port = DVO_D;
+	else
+		return 0;
+
+	ret = 0;
+	for (i = 0; i < dev_priv->child_dev_num; i++) {
+		p_child = dev_priv->child_dev + i;
+		/*
+		 * If the device type is not HDMI, continue.
+		 */
+		if (p_child->device_type != DEVICE_TYPE_HDMI)
+			continue;
+		/* Find the HDMI port */
+		if (p_child->dvo_port == hdmi_port) {
+			ret = 1;
+			break;
+		}
+	}
+	return ret;
+}
 void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -233,6 +278,10 @@
 	struct intel_output *intel_output;
 	struct intel_hdmi_priv *hdmi_priv;
 
+	if (!hdmi_is_present_in_vbt(dev, sdvox_reg)) {
+		DRM_DEBUG_KMS("HDMI is not present. Ignored it \n");
+		return;
+	}
 	intel_output = kcalloc(sizeof(struct intel_output) +
 			       sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
 	if (!intel_output)
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index c7eab72..8673c73 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -39,7 +39,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	/* When using bit bashing for I2C, this bit needs to be set to 1 */
-	if (!IS_IGD(dev))
+	if (!IS_PINEVIEW(dev))
 		return;
 	if (enable)
 		I915_WRITE(DSPCLK_GATE_D,
@@ -118,6 +118,23 @@
 	udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
 }
 
+/* Clears the GMBUS setup.  Our driver doesn't make use of the GMBUS I2C
+ * engine, but if the BIOS leaves it enabled, then that can break our use
+ * of the bit-banging I2C interfaces.  This is notably the case with the
+ * Mac Mini in EFI mode.
+ */
+void
+intel_i2c_reset_gmbus(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (IS_IRONLAKE(dev)) {
+		I915_WRITE(PCH_GMBUS0, 0);
+	} else {
+		I915_WRITE(GMBUS0, 0);
+	}
+}
+
 /**
  * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
  * @dev: DRM device
@@ -168,6 +185,8 @@
 	if(i2c_bit_add_bus(&chan->adapter))
 		goto out_free;
 
+	intel_i2c_reset_gmbus(dev);
+
 	/* JJJ:  raise SCL and SDA? */
 	intel_i2c_quirk_set(dev, true);
 	set_data(chan, 1);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index eb36502..3118ce2 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -56,7 +56,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 blc_pwm_ctl, reg;
 
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		reg = BLC_PWM_CPU_CTL;
 	else
 		reg = BLC_PWM_CTL;
@@ -74,7 +74,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 reg;
 
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		reg = BLC_PWM_PCH_CTL2;
 	else
 		reg = BLC_PWM_CTL;
@@ -91,7 +91,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 pp_status, ctl_reg, status_reg;
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		ctl_reg = PCH_PP_CONTROL;
 		status_reg = PCH_PP_STATUS;
 	} else {
@@ -137,7 +137,7 @@
 	u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
 	u32 pwm_ctl_reg;
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		pp_on_reg = PCH_PP_ON_DELAYS;
 		pp_off_reg = PCH_PP_OFF_DELAYS;
 		pp_ctl_reg = PCH_PP_CONTROL;
@@ -174,7 +174,7 @@
 	u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
 	u32 pwm_ctl_reg;
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		pp_on_reg = PCH_PP_ON_DELAYS;
 		pp_off_reg = PCH_PP_OFF_DELAYS;
 		pp_ctl_reg = PCH_PP_CONTROL;
@@ -297,7 +297,7 @@
 	}
 
 	/* full screen scale for now */
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		goto out;
 
 	/* 965+ wants fuzzy fitting */
@@ -327,7 +327,7 @@
 	 * to register description and PRM.
 	 * Change the value here to see the borders for debugging
 	 */
-	if (!IS_IGDNG(dev)) {
+	if (!IS_IRONLAKE(dev)) {
 		I915_WRITE(BCLRPAT_A, 0);
 		I915_WRITE(BCLRPAT_B, 0);
 	}
@@ -548,7 +548,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 reg;
 
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		reg = BLC_PWM_CPU_CTL;
 	else
 		reg = BLC_PWM_CTL;
@@ -587,7 +587,7 @@
 	 * settings.
 	 */
 
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		return;
 
 	/*
@@ -914,6 +914,101 @@
 #endif
 
 /**
+ * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
+ * @dev: drm device
+ * @connector: LVDS connector
+ *
+ * Find the reduced downclock for LVDS in EDID.
+ */
+static void intel_find_lvds_downclock(struct drm_device *dev,
+				struct drm_connector *connector)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_display_mode *scan, *panel_fixed_mode;
+	int temp_downclock;
+
+	panel_fixed_mode = dev_priv->panel_fixed_mode;
+	temp_downclock = panel_fixed_mode->clock;
+
+	mutex_lock(&dev->mode_config.mutex);
+	list_for_each_entry(scan, &connector->probed_modes, head) {
+		/*
+		 * If one mode has the same resolution with the fixed_panel
+		 * mode while they have the different refresh rate, it means
+		 * that the reduced downclock is found for the LVDS. In such
+		 * case we can set the different FPx0/1 to dynamically select
+		 * between low and high frequency.
+		 */
+		if (scan->hdisplay == panel_fixed_mode->hdisplay &&
+			scan->hsync_start == panel_fixed_mode->hsync_start &&
+			scan->hsync_end == panel_fixed_mode->hsync_end &&
+			scan->htotal == panel_fixed_mode->htotal &&
+			scan->vdisplay == panel_fixed_mode->vdisplay &&
+			scan->vsync_start == panel_fixed_mode->vsync_start &&
+			scan->vsync_end == panel_fixed_mode->vsync_end &&
+			scan->vtotal == panel_fixed_mode->vtotal) {
+			if (scan->clock < temp_downclock) {
+				/*
+				 * The downclock is already found. But we
+				 * expect to find the lower downclock.
+				 */
+				temp_downclock = scan->clock;
+			}
+		}
+	}
+	mutex_unlock(&dev->mode_config.mutex);
+	if (temp_downclock < panel_fixed_mode->clock) {
+		/* We found the downclock for LVDS. */
+		dev_priv->lvds_downclock_avail = 1;
+		dev_priv->lvds_downclock = temp_downclock;
+		DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
+				"Normal clock %dKhz, downclock %dKhz\n",
+				panel_fixed_mode->clock, temp_downclock);
+	}
+	return;
+}
+
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the LVDS is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the LVDS is present.
+ * Note: The addin_offset should also be checked for LVDS panel.
+ * Only when it is non-zero, it is assumed that it is present.
+ */
+static int lvds_is_present_in_vbt(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct child_device_config *p_child;
+	int i, ret;
+
+	if (!dev_priv->child_dev_num)
+		return 1;
+
+	ret = 0;
+	for (i = 0; i < dev_priv->child_dev_num; i++) {
+		p_child = dev_priv->child_dev + i;
+		/*
+		 * If the device type is not LFP, continue.
+		 * If the device type is 0x22, it is also regarded as LFP.
+		 */
+		if (p_child->device_type != DEVICE_TYPE_INT_LFP &&
+			p_child->device_type != DEVICE_TYPE_LFP)
+			continue;
+
+		/* The addin_offset should be checked. Only when it is
+		 * non-zero, it is regarded as present.
+		 */
+		if (p_child->addin_offset) {
+			ret = 1;
+			break;
+		}
+	}
+	return ret;
+}
+
+/**
  * intel_lvds_init - setup LVDS connectors on this device
  * @dev: drm device
  *
@@ -936,21 +1031,20 @@
 	if (dmi_check_system(intel_no_lvds))
 		return;
 
-	/* Assume that any device without an ACPI LID device also doesn't
-	 * have an integrated LVDS.  We would be better off parsing the BIOS
-	 * to get a reliable indicator, but that code isn't written yet.
-	 *
-	 * In the case of all-in-one desktops using LVDS that we've seen,
-	 * they're using SDVO LVDS.
+	/*
+	 * Assume LVDS is present if there's an ACPI lid device or if the
+	 * device is present in the VBT.
 	 */
-	if (!intel_lid_present())
+	if (!lvds_is_present_in_vbt(dev) && !intel_lid_present()) {
+		DRM_DEBUG_KMS("LVDS is not present in VBT and no lid detected\n");
 		return;
+	}
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
 			return;
 		if (dev_priv->edp_support) {
-			DRM_DEBUG("disable LVDS for eDP support\n");
+			DRM_DEBUG_KMS("disable LVDS for eDP support\n");
 			return;
 		}
 		gpio = PCH_GPIOC;
@@ -1023,6 +1117,7 @@
 			dev_priv->panel_fixed_mode =
 				drm_mode_duplicate(dev, scan);
 			mutex_unlock(&dev->mode_config.mutex);
+			intel_find_lvds_downclock(dev, connector);
 			goto out;
 		}
 		mutex_unlock(&dev->mode_config.mutex);
@@ -1047,8 +1142,8 @@
 	 * correct mode.
 	 */
 
-	/* IGDNG: FIXME if still fail, not try pipe mode now */
-	if (IS_IGDNG(dev))
+	/* Ironlake: FIXME if still fail, not try pipe mode now */
+	if (IS_IRONLAKE(dev))
 		goto failed;
 
 	lvds = I915_READ(LVDS);
@@ -1069,7 +1164,7 @@
 		goto failed;
 
 out:
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		u32 pwm;
 		/* make sure PWM is enabled */
 		pwm = I915_READ(BLC_PWM_CPU_CTL2);
@@ -1082,7 +1177,7 @@
 	}
 	dev_priv->lid_notifier.notifier_call = intel_lid_notify;
 	if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
-		DRM_DEBUG("lid notifier registration failed\n");
+		DRM_DEBUG_KMS("lid notifier registration failed\n");
 		dev_priv->lid_notifier.notifier_call = NULL;
 	}
 	drm_sysfs_connector_add(connector);
@@ -1093,5 +1188,6 @@
 	if (intel_output->ddc_bus)
 		intel_i2c_destroy(intel_output->ddc_bus);
 	drm_connector_cleanup(connector);
+	drm_encoder_cleanup(encoder);
 	kfree(intel_output);
 }
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
new file mode 100644
index 0000000..2639591
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -0,0 +1,1416 @@
+/*
+ * Copyright © 2009
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Daniel Vetter <daniel@ffwll.ch>
+ *
+ * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_drv.h"
+
+/* Limits for overlay size. According to intel doc, the real limits are:
+ * Y width: 4095, UV width (planar): 2047, Y height: 2047,
+ * UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use
+ * the mininum of both.  */
+#define IMAGE_MAX_WIDTH		2048
+#define IMAGE_MAX_HEIGHT	2046 /* 2 * 1023 */
+/* on 830 and 845 these large limits result in the card hanging */
+#define IMAGE_MAX_WIDTH_LEGACY	1024
+#define IMAGE_MAX_HEIGHT_LEGACY	1088
+
+/* overlay register definitions */
+/* OCMD register */
+#define OCMD_TILED_SURFACE	(0x1<<19)
+#define OCMD_MIRROR_MASK	(0x3<<17)
+#define OCMD_MIRROR_MODE	(0x3<<17)
+#define OCMD_MIRROR_HORIZONTAL	(0x1<<17)
+#define OCMD_MIRROR_VERTICAL	(0x2<<17)
+#define OCMD_MIRROR_BOTH	(0x3<<17)
+#define OCMD_BYTEORDER_MASK	(0x3<<14) /* zero for YUYV or FOURCC YUY2 */
+#define OCMD_UV_SWAP		(0x1<<14) /* YVYU */
+#define OCMD_Y_SWAP		(0x2<<14) /* UYVY or FOURCC UYVY */
+#define OCMD_Y_AND_UV_SWAP	(0x3<<14) /* VYUY */
+#define OCMD_SOURCE_FORMAT_MASK (0xf<<10)
+#define OCMD_RGB_888		(0x1<<10) /* not in i965 Intel docs */
+#define OCMD_RGB_555		(0x2<<10) /* not in i965 Intel docs */
+#define OCMD_RGB_565		(0x3<<10) /* not in i965 Intel docs */
+#define OCMD_YUV_422_PACKED	(0x8<<10)
+#define OCMD_YUV_411_PACKED	(0x9<<10) /* not in i965 Intel docs */
+#define OCMD_YUV_420_PLANAR	(0xc<<10)
+#define OCMD_YUV_422_PLANAR	(0xd<<10)
+#define OCMD_YUV_410_PLANAR	(0xe<<10) /* also 411 */
+#define OCMD_TVSYNCFLIP_PARITY	(0x1<<9)
+#define OCMD_TVSYNCFLIP_ENABLE	(0x1<<7)
+#define OCMD_BUF_TYPE_MASK	(Ox1<<5)
+#define OCMD_BUF_TYPE_FRAME	(0x0<<5)
+#define OCMD_BUF_TYPE_FIELD	(0x1<<5)
+#define OCMD_TEST_MODE		(0x1<<4)
+#define OCMD_BUFFER_SELECT	(0x3<<2)
+#define OCMD_BUFFER0		(0x0<<2)
+#define OCMD_BUFFER1		(0x1<<2)
+#define OCMD_FIELD_SELECT	(0x1<<2)
+#define OCMD_FIELD0		(0x0<<1)
+#define OCMD_FIELD1		(0x1<<1)
+#define OCMD_ENABLE		(0x1<<0)
+
+/* OCONFIG register */
+#define OCONF_PIPE_MASK		(0x1<<18)
+#define OCONF_PIPE_A		(0x0<<18)
+#define OCONF_PIPE_B		(0x1<<18)
+#define OCONF_GAMMA2_ENABLE	(0x1<<16)
+#define OCONF_CSC_MODE_BT601	(0x0<<5)
+#define OCONF_CSC_MODE_BT709	(0x1<<5)
+#define OCONF_CSC_BYPASS	(0x1<<4)
+#define OCONF_CC_OUT_8BIT	(0x1<<3)
+#define OCONF_TEST_MODE		(0x1<<2)
+#define OCONF_THREE_LINE_BUFFER	(0x1<<0)
+#define OCONF_TWO_LINE_BUFFER	(0x0<<0)
+
+/* DCLRKM (dst-key) register */
+#define DST_KEY_ENABLE		(0x1<<31)
+#define CLK_RGB24_MASK		0x0
+#define CLK_RGB16_MASK		0x070307
+#define CLK_RGB15_MASK		0x070707
+#define CLK_RGB8I_MASK		0xffffff
+
+#define RGB16_TO_COLORKEY(c) \
+	(((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3))
+#define RGB15_TO_COLORKEY(c) \
+	(((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3))
+
+/* overlay flip addr flag */
+#define OFC_UPDATE		0x1
+
+/* polyphase filter coefficients */
+#define N_HORIZ_Y_TAPS          5
+#define N_VERT_Y_TAPS           3
+#define N_HORIZ_UV_TAPS         3
+#define N_VERT_UV_TAPS          3
+#define N_PHASES                17
+#define MAX_TAPS                5
+
+/* memory bufferd overlay registers */
+struct overlay_registers {
+    u32 OBUF_0Y;
+    u32 OBUF_1Y;
+    u32 OBUF_0U;
+    u32 OBUF_0V;
+    u32 OBUF_1U;
+    u32 OBUF_1V;
+    u32 OSTRIDE;
+    u32 YRGB_VPH;
+    u32 UV_VPH;
+    u32 HORZ_PH;
+    u32 INIT_PHS;
+    u32 DWINPOS;
+    u32 DWINSZ;
+    u32 SWIDTH;
+    u32 SWIDTHSW;
+    u32 SHEIGHT;
+    u32 YRGBSCALE;
+    u32 UVSCALE;
+    u32 OCLRC0;
+    u32 OCLRC1;
+    u32 DCLRKV;
+    u32 DCLRKM;
+    u32 SCLRKVH;
+    u32 SCLRKVL;
+    u32 SCLRKEN;
+    u32 OCONFIG;
+    u32 OCMD;
+    u32 RESERVED1; /* 0x6C */
+    u32 OSTART_0Y;
+    u32 OSTART_1Y;
+    u32 OSTART_0U;
+    u32 OSTART_0V;
+    u32 OSTART_1U;
+    u32 OSTART_1V;
+    u32 OTILEOFF_0Y;
+    u32 OTILEOFF_1Y;
+    u32 OTILEOFF_0U;
+    u32 OTILEOFF_0V;
+    u32 OTILEOFF_1U;
+    u32 OTILEOFF_1V;
+    u32 FASTHSCALE; /* 0xA0 */
+    u32 UVSCALEV; /* 0xA4 */
+    u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
+    u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
+    u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
+    u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
+    u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
+    u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
+    u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
+    u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
+    u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
+};
+
+/* overlay flip addr flag */
+#define OFC_UPDATE		0x1
+
+#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
+#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev))
+
+
+static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
+{
+        drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+	struct overlay_registers *regs;
+
+	/* no recursive mappings */
+	BUG_ON(overlay->virt_addr);
+
+	if (OVERLAY_NONPHYSICAL(overlay->dev)) {
+		regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+				overlay->reg_bo->gtt_offset);
+
+		if (!regs) {
+			DRM_ERROR("failed to map overlay regs in GTT\n");
+			return NULL;
+		}
+	} else
+		regs = overlay->reg_bo->phys_obj->handle->vaddr;
+
+	return overlay->virt_addr = regs;
+}
+
+static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
+{
+	struct drm_device *dev = overlay->dev;
+        drm_i915_private_t *dev_priv = dev->dev_private;
+
+	if (OVERLAY_NONPHYSICAL(overlay->dev))
+		io_mapping_unmap_atomic(overlay->virt_addr);
+
+	overlay->virt_addr = NULL;
+
+	I915_READ(OVADD); /* flush wc cashes */
+
+	return;
+}
+
+/* overlay needs to be disable in OCMD reg */
+static int intel_overlay_on(struct intel_overlay *overlay)
+{
+	struct drm_device *dev = overlay->dev;
+        drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
+	RING_LOCALS;
+
+	BUG_ON(overlay->active);
+
+	overlay->active = 1;
+	overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
+
+	BEGIN_LP_RING(6);
+	OUT_RING(MI_FLUSH);
+	OUT_RING(MI_NOOP);
+	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+	OUT_RING(overlay->flip_addr | OFC_UPDATE);
+	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	OUT_RING(MI_NOOP);
+	ADVANCE_LP_RING();
+
+	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+	if (overlay->last_flip_req == 0)
+		return -ENOMEM;
+
+	ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+	if (ret != 0)
+		return ret;
+
+	overlay->hw_wedged = 0;
+	overlay->last_flip_req = 0;
+	return 0;
+}
+
+/* overlay needs to be enabled in OCMD reg */
+static void intel_overlay_continue(struct intel_overlay *overlay,
+			    bool load_polyphase_filter)
+{
+	struct drm_device *dev = overlay->dev;
+        drm_i915_private_t *dev_priv = dev->dev_private;
+	u32 flip_addr = overlay->flip_addr;
+	u32 tmp;
+	RING_LOCALS;
+
+	BUG_ON(!overlay->active);
+
+	if (load_polyphase_filter)
+		flip_addr |= OFC_UPDATE;
+
+	/* check for underruns */
+	tmp = I915_READ(DOVSTA);
+	if (tmp & (1 << 17))
+		DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
+
+	BEGIN_LP_RING(4);
+	OUT_RING(MI_FLUSH);
+	OUT_RING(MI_NOOP);
+	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	OUT_RING(flip_addr);
+        ADVANCE_LP_RING();
+
+	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+}
+
+static int intel_overlay_wait_flip(struct intel_overlay *overlay)
+{
+	struct drm_device *dev = overlay->dev;
+        drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
+	u32 tmp;
+	RING_LOCALS;
+
+	if (overlay->last_flip_req != 0) {
+		ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+		if (ret == 0) {
+			overlay->last_flip_req = 0;
+
+			tmp = I915_READ(ISR);
+
+			if (!(tmp & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT))
+				return 0;
+		}
+	}
+
+	/* synchronous slowpath */
+	overlay->hw_wedged = RELEASE_OLD_VID;
+
+	BEGIN_LP_RING(2);
+        OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+        OUT_RING(MI_NOOP);
+        ADVANCE_LP_RING();
+
+	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+	if (overlay->last_flip_req == 0)
+		return -ENOMEM;
+
+	ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+	if (ret != 0)
+		return ret;
+
+	overlay->hw_wedged = 0;
+	overlay->last_flip_req = 0;
+	return 0;
+}
+
+/* overlay needs to be disabled in OCMD reg */
+static int intel_overlay_off(struct intel_overlay *overlay)
+{
+	u32 flip_addr = overlay->flip_addr;
+	struct drm_device *dev = overlay->dev;
+        drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
+	RING_LOCALS;
+
+	BUG_ON(!overlay->active);
+
+	/* According to intel docs the overlay hw may hang (when switching
+	 * off) without loading the filter coeffs. It is however unclear whether
+	 * this applies to the disabling of the overlay or to the switching off
+	 * of the hw. Do it in both cases */
+	flip_addr |= OFC_UPDATE;
+
+	/* wait for overlay to go idle */
+	overlay->hw_wedged = SWITCH_OFF_STAGE_1;
+
+	BEGIN_LP_RING(6);
+	OUT_RING(MI_FLUSH);
+	OUT_RING(MI_NOOP);
+	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	OUT_RING(flip_addr);
+        OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+        OUT_RING(MI_NOOP);
+        ADVANCE_LP_RING();
+
+	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+	if (overlay->last_flip_req == 0)
+		return -ENOMEM;
+
+	ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+	if (ret != 0)
+		return ret;
+
+	/* turn overlay off */
+	overlay->hw_wedged = SWITCH_OFF_STAGE_2;
+
+	BEGIN_LP_RING(6);
+        OUT_RING(MI_FLUSH);
+        OUT_RING(MI_NOOP);
+        OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+	OUT_RING(flip_addr);
+        OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+        OUT_RING(MI_NOOP);
+	ADVANCE_LP_RING();
+
+	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+	if (overlay->last_flip_req == 0)
+		return -ENOMEM;
+
+	ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+	if (ret != 0)
+		return ret;
+
+	overlay->hw_wedged = 0;
+	overlay->last_flip_req = 0;
+	return ret;
+}
+
+static void intel_overlay_off_tail(struct intel_overlay *overlay)
+{
+	struct drm_gem_object *obj;
+
+	/* never have the overlay hw on without showing a frame */
+	BUG_ON(!overlay->vid_bo);
+	obj = overlay->vid_bo->obj;
+
+	i915_gem_object_unpin(obj);
+	drm_gem_object_unreference(obj);
+	overlay->vid_bo = NULL;
+
+	overlay->crtc->overlay = NULL;
+	overlay->crtc = NULL;
+	overlay->active = 0;
+}
+
+/* recover from an interruption due to a signal
+ * We have to be careful not to repeat work forever an make forward progess. */
+int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
+					 int interruptible)
+{
+	struct drm_device *dev = overlay->dev;
+        drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_gem_object *obj;
+	u32 flip_addr;
+	int ret;
+	RING_LOCALS;
+
+	if (overlay->hw_wedged == HW_WEDGED)
+		return -EIO;
+
+	if (overlay->last_flip_req == 0) {
+		overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+		if (overlay->last_flip_req == 0)
+			return -ENOMEM;
+	}
+
+	ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible);
+	if (ret != 0)
+		return ret;
+
+	switch (overlay->hw_wedged) {
+		case RELEASE_OLD_VID:
+			obj = overlay->old_vid_bo->obj;
+			i915_gem_object_unpin(obj);
+			drm_gem_object_unreference(obj);
+			overlay->old_vid_bo = NULL;
+			break;
+		case SWITCH_OFF_STAGE_1:
+			flip_addr = overlay->flip_addr;
+			flip_addr |= OFC_UPDATE;
+
+			overlay->hw_wedged = SWITCH_OFF_STAGE_2;
+
+			BEGIN_LP_RING(6);
+			OUT_RING(MI_FLUSH);
+			OUT_RING(MI_NOOP);
+			OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+			OUT_RING(flip_addr);
+			OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+			OUT_RING(MI_NOOP);
+			ADVANCE_LP_RING();
+
+			overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+			if (overlay->last_flip_req == 0)
+				return -ENOMEM;
+
+			ret = i915_do_wait_request(dev, overlay->last_flip_req,
+					interruptible);
+			if (ret != 0)
+				return ret;
+
+		case SWITCH_OFF_STAGE_2:
+			intel_overlay_off_tail(overlay);
+			break;
+		default:
+			BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP);
+	}
+
+	overlay->hw_wedged = 0;
+	overlay->last_flip_req = 0;
+	return 0;
+}
+
+/* Wait for pending overlay flip and release old frame.
+ * Needs to be called before the overlay register are changed
+ * via intel_overlay_(un)map_regs_atomic */
+static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
+{
+	int ret;
+	struct drm_gem_object *obj;
+
+	/* only wait if there is actually an old frame to release to
+	 * guarantee forward progress */
+	if (!overlay->old_vid_bo)
+		return 0;
+
+	ret = intel_overlay_wait_flip(overlay);
+	if (ret != 0)
+		return ret;
+
+	obj = overlay->old_vid_bo->obj;
+	i915_gem_object_unpin(obj);
+	drm_gem_object_unreference(obj);
+	overlay->old_vid_bo = NULL;
+
+	return 0;
+}
+
+struct put_image_params {
+	int format;
+	short dst_x;
+	short dst_y;
+	short dst_w;
+	short dst_h;
+	short src_w;
+	short src_scan_h;
+	short src_scan_w;
+	short src_h;
+	short stride_Y;
+	short stride_UV;
+	int offset_Y;
+	int offset_U;
+	int offset_V;
+};
+
+static int packed_depth_bytes(u32 format)
+{
+	switch (format & I915_OVERLAY_DEPTH_MASK) {
+		case I915_OVERLAY_YUV422:
+			return 4;
+		case I915_OVERLAY_YUV411:
+			/* return 6; not implemented */
+		default:
+			return -EINVAL;
+	}
+}
+
+static int packed_width_bytes(u32 format, short width)
+{
+	switch (format & I915_OVERLAY_DEPTH_MASK) {
+		case I915_OVERLAY_YUV422:
+			return width << 1;
+		default:
+			return -EINVAL;
+	}
+}
+
+static int uv_hsubsampling(u32 format)
+{
+	switch (format & I915_OVERLAY_DEPTH_MASK) {
+		case I915_OVERLAY_YUV422:
+		case I915_OVERLAY_YUV420:
+			return 2;
+		case I915_OVERLAY_YUV411:
+		case I915_OVERLAY_YUV410:
+			return 4;
+		default:
+			return -EINVAL;
+	}
+}
+
+static int uv_vsubsampling(u32 format)
+{
+	switch (format & I915_OVERLAY_DEPTH_MASK) {
+		case I915_OVERLAY_YUV420:
+		case I915_OVERLAY_YUV410:
+			return 2;
+		case I915_OVERLAY_YUV422:
+		case I915_OVERLAY_YUV411:
+			return 1;
+		default:
+			return -EINVAL;
+	}
+}
+
+static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
+{
+	u32 mask, shift, ret;
+	if (IS_I9XX(dev)) {
+		mask = 0x3f;
+		shift = 6;
+	} else {
+		mask = 0x1f;
+		shift = 5;
+	}
+	ret = ((offset + width + mask) >> shift) - (offset >> shift);
+	if (IS_I9XX(dev))
+		ret <<= 1;
+	ret -=1;
+	return ret << 2;
+}
+
+static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
+	0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0,
+	0x3000, 0xb500, 0x19d0, 0x1880, 0xb440,
+	0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0,
+	0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380,
+	0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320,
+	0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0,
+	0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260,
+	0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200,
+	0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0,
+	0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160,
+	0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120,
+	0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0,
+	0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0,
+	0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
+	0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
+	0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
+	0xb000, 0x3000, 0x0800, 0x3000, 0xb000};
+static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
+	0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
+	0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
+	0xb040, 0x1b20, 0x29e0, 0xb060, 0x1bd8, 0x2880,
+	0xb080, 0x1c88, 0x3e60, 0xb0a0, 0x1d28, 0x3c00,
+	0xb0c0, 0x1db8, 0x39e0, 0xb0e0, 0x1e40, 0x37e0,
+	0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
+	0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
+	0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
+	0x3000, 0x0800, 0x3000};
+
+static void update_polyphase_filter(struct overlay_registers *regs)
+{
+	memcpy(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
+	memcpy(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs));
+}
+
+static bool update_scaling_factors(struct intel_overlay *overlay,
+				   struct overlay_registers *regs,
+				   struct put_image_params *params)
+{
+	/* fixed point with a 12 bit shift */
+	u32 xscale, yscale, xscale_UV, yscale_UV;
+#define FP_SHIFT 12
+#define FRACT_MASK 0xfff
+	bool scale_changed = false;
+	int uv_hscale = uv_hsubsampling(params->format);
+	int uv_vscale = uv_vsubsampling(params->format);
+
+	if (params->dst_w > 1)
+		xscale = ((params->src_scan_w - 1) << FP_SHIFT)
+			/(params->dst_w);
+	else
+		xscale = 1 << FP_SHIFT;
+
+	if (params->dst_h > 1)
+		yscale = ((params->src_scan_h - 1) << FP_SHIFT)
+			/(params->dst_h);
+	else
+		yscale = 1 << FP_SHIFT;
+
+	/*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
+		xscale_UV = xscale/uv_hscale;
+		yscale_UV = yscale/uv_vscale;
+		/* make the Y scale to UV scale ratio an exact multiply */
+		xscale = xscale_UV * uv_hscale;
+		yscale = yscale_UV * uv_vscale;
+	/*} else {
+		xscale_UV = 0;
+		yscale_UV = 0;
+	}*/
+
+	if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
+		scale_changed = true;
+	overlay->old_xscale = xscale;
+	overlay->old_yscale = yscale;
+
+	regs->YRGBSCALE = ((yscale & FRACT_MASK) << 20)
+		| ((xscale >> FP_SHIFT) << 16)
+		| ((xscale & FRACT_MASK) << 3);
+	regs->UVSCALE = ((yscale_UV & FRACT_MASK) << 20)
+		| ((xscale_UV >> FP_SHIFT) << 16)
+		| ((xscale_UV & FRACT_MASK) << 3);
+	regs->UVSCALEV = ((yscale >> FP_SHIFT) << 16)
+		| ((yscale_UV >> FP_SHIFT) << 0);
+
+	if (scale_changed)
+		update_polyphase_filter(regs);
+
+	return scale_changed;
+}
+
+static void update_colorkey(struct intel_overlay *overlay,
+			    struct overlay_registers *regs)
+{
+	u32 key = overlay->color_key;
+	switch (overlay->crtc->base.fb->bits_per_pixel) {
+		case 8:
+			regs->DCLRKV = 0;
+			regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
+		case 16:
+			if (overlay->crtc->base.fb->depth == 15) {
+				regs->DCLRKV = RGB15_TO_COLORKEY(key);
+				regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
+			} else {
+				regs->DCLRKV = RGB16_TO_COLORKEY(key);
+				regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
+			}
+		case 24:
+		case 32:
+			regs->DCLRKV = key;
+			regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
+	}
+}
+
+static u32 overlay_cmd_reg(struct put_image_params *params)
+{
+	u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0;
+
+	if (params->format & I915_OVERLAY_YUV_PLANAR) {
+		switch (params->format & I915_OVERLAY_DEPTH_MASK) {
+			case I915_OVERLAY_YUV422:
+				cmd |= OCMD_YUV_422_PLANAR;
+				break;
+			case I915_OVERLAY_YUV420:
+				cmd |= OCMD_YUV_420_PLANAR;
+				break;
+			case I915_OVERLAY_YUV411:
+			case I915_OVERLAY_YUV410:
+				cmd |= OCMD_YUV_410_PLANAR;
+				break;
+		}
+	} else { /* YUV packed */
+		switch (params->format & I915_OVERLAY_DEPTH_MASK) {
+			case I915_OVERLAY_YUV422:
+				cmd |= OCMD_YUV_422_PACKED;
+				break;
+			case I915_OVERLAY_YUV411:
+				cmd |= OCMD_YUV_411_PACKED;
+				break;
+		}
+
+		switch (params->format & I915_OVERLAY_SWAP_MASK) {
+			case I915_OVERLAY_NO_SWAP:
+				break;
+			case I915_OVERLAY_UV_SWAP:
+				cmd |= OCMD_UV_SWAP;
+				break;
+			case I915_OVERLAY_Y_SWAP:
+				cmd |= OCMD_Y_SWAP;
+				break;
+			case I915_OVERLAY_Y_AND_UV_SWAP:
+				cmd |= OCMD_Y_AND_UV_SWAP;
+				break;
+		}
+	}
+
+	return cmd;
+}
+
+int intel_overlay_do_put_image(struct intel_overlay *overlay,
+			       struct drm_gem_object *new_bo,
+			       struct put_image_params *params)
+{
+	int ret, tmp_width;
+	struct overlay_registers *regs;
+	bool scale_changed = false;
+	struct drm_i915_gem_object *bo_priv = new_bo->driver_private;
+	struct drm_device *dev = overlay->dev;
+
+	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+	BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
+	BUG_ON(!overlay);
+
+	ret = intel_overlay_release_old_vid(overlay);
+	if (ret != 0)
+		return ret;
+
+	ret = i915_gem_object_pin(new_bo, PAGE_SIZE);
+	if (ret != 0)
+		return ret;
+
+	ret = i915_gem_object_set_to_gtt_domain(new_bo, 0);
+	if (ret != 0)
+		goto out_unpin;
+
+	if (!overlay->active) {
+		regs = intel_overlay_map_regs_atomic(overlay);
+		if (!regs) {
+			ret = -ENOMEM;
+			goto out_unpin;
+		}
+		regs->OCONFIG = OCONF_CC_OUT_8BIT;
+		if (IS_I965GM(overlay->dev))
+			regs->OCONFIG |= OCONF_CSC_MODE_BT709;
+		regs->OCONFIG |= overlay->crtc->pipe == 0 ?
+			OCONF_PIPE_A : OCONF_PIPE_B;
+		intel_overlay_unmap_regs_atomic(overlay);
+
+		ret = intel_overlay_on(overlay);
+		if (ret != 0)
+			goto out_unpin;
+	}
+
+	regs = intel_overlay_map_regs_atomic(overlay);
+	if (!regs) {
+		ret = -ENOMEM;
+		goto out_unpin;
+	}
+
+	regs->DWINPOS = (params->dst_y << 16) | params->dst_x;
+	regs->DWINSZ = (params->dst_h << 16) | params->dst_w;
+
+	if (params->format & I915_OVERLAY_YUV_PACKED)
+		tmp_width = packed_width_bytes(params->format, params->src_w);
+	else
+		tmp_width = params->src_w;
+
+	regs->SWIDTH = params->src_w;
+	regs->SWIDTHSW = calc_swidthsw(overlay->dev,
+			params->offset_Y, tmp_width);
+	regs->SHEIGHT = params->src_h;
+	regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
+	regs->OSTRIDE = params->stride_Y;
+
+	if (params->format & I915_OVERLAY_YUV_PLANAR) {
+		int uv_hscale = uv_hsubsampling(params->format);
+		int uv_vscale = uv_vsubsampling(params->format);
+		u32 tmp_U, tmp_V;
+		regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
+		tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
+				params->src_w/uv_hscale);
+		tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
+				params->src_w/uv_hscale);
+		regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
+		regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
+		regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
+		regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V;
+		regs->OSTRIDE |= params->stride_UV << 16;
+	}
+
+	scale_changed = update_scaling_factors(overlay, regs, params);
+
+	update_colorkey(overlay, regs);
+
+	regs->OCMD = overlay_cmd_reg(params);
+
+	intel_overlay_unmap_regs_atomic(overlay);
+
+	intel_overlay_continue(overlay, scale_changed);
+
+	overlay->old_vid_bo = overlay->vid_bo;
+	overlay->vid_bo = new_bo->driver_private;
+
+	return 0;
+
+out_unpin:
+	i915_gem_object_unpin(new_bo);
+	return ret;
+}
+
+int intel_overlay_switch_off(struct intel_overlay *overlay)
+{
+	int ret;
+	struct overlay_registers *regs;
+	struct drm_device *dev = overlay->dev;
+
+	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+	BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
+	if (overlay->hw_wedged) {
+		ret = intel_overlay_recover_from_interrupt(overlay, 1);
+		if (ret != 0)
+			return ret;
+	}
+
+	if (!overlay->active)
+		return 0;
+
+	ret = intel_overlay_release_old_vid(overlay);
+	if (ret != 0)
+		return ret;
+
+	regs = intel_overlay_map_regs_atomic(overlay);
+	regs->OCMD = 0;
+	intel_overlay_unmap_regs_atomic(overlay);
+
+	ret = intel_overlay_off(overlay);
+	if (ret != 0)
+		return ret;
+
+	intel_overlay_off_tail(overlay);
+
+	return 0;
+}
+
+static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
+					  struct intel_crtc *crtc)
+{
+        drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+	u32 pipeconf;
+	int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF;
+
+	if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON)
+		return -EINVAL;
+
+	pipeconf = I915_READ(pipeconf_reg);
+
+	/* can't use the overlay with double wide pipe */
+	if (!IS_I965G(overlay->dev) && pipeconf & PIPEACONF_DOUBLE_WIDE)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
+{
+	struct drm_device *dev = overlay->dev;
+        drm_i915_private_t *dev_priv = dev->dev_private;
+	u32 ratio;
+	u32 pfit_control = I915_READ(PFIT_CONTROL);
+
+	/* XXX: This is not the same logic as in the xorg driver, but more in
+	 * line with the intel documentation for the i965 */
+	if (!IS_I965G(dev) && (pfit_control & VERT_AUTO_SCALE)) {
+		ratio = I915_READ(PFIT_AUTO_RATIOS) >> PFIT_VERT_SCALE_SHIFT;
+	} else { /* on i965 use the PGM reg to read out the autoscaler values */
+		ratio = I915_READ(PFIT_PGM_RATIOS);
+		if (IS_I965G(dev))
+			ratio >>= PFIT_VERT_SCALE_SHIFT_965;
+		else
+			ratio >>= PFIT_VERT_SCALE_SHIFT;
+	}
+
+	overlay->pfit_vscale_ratio = ratio;
+}
+
+static int check_overlay_dst(struct intel_overlay *overlay,
+			     struct drm_intel_overlay_put_image *rec)
+{
+	struct drm_display_mode *mode = &overlay->crtc->base.mode;
+
+	if ((rec->dst_x < mode->crtc_hdisplay)
+	    && (rec->dst_x + rec->dst_width
+		    <= mode->crtc_hdisplay)
+	    && (rec->dst_y < mode->crtc_vdisplay)
+	    && (rec->dst_y + rec->dst_height
+		    <= mode->crtc_vdisplay))
+		return 0;
+	else
+		return -EINVAL;
+}
+
+static int check_overlay_scaling(struct put_image_params *rec)
+{
+	u32 tmp;
+
+	/* downscaling limit is 8.0 */
+	tmp = ((rec->src_scan_h << 16) / rec->dst_h) >> 16;
+	if (tmp > 7)
+		return -EINVAL;
+	tmp = ((rec->src_scan_w << 16) / rec->dst_w) >> 16;
+	if (tmp > 7)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int check_overlay_src(struct drm_device *dev,
+			     struct drm_intel_overlay_put_image *rec,
+			     struct drm_gem_object *new_bo)
+{
+	u32 stride_mask;
+	int depth;
+	int uv_hscale = uv_hsubsampling(rec->flags);
+	int uv_vscale = uv_vsubsampling(rec->flags);
+	size_t tmp;
+
+	/* check src dimensions */
+	if (IS_845G(dev) || IS_I830(dev)) {
+		if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY
+		    || rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
+			return -EINVAL;
+	} else {
+		if (rec->src_height > IMAGE_MAX_HEIGHT
+		    || rec->src_width > IMAGE_MAX_WIDTH)
+			return -EINVAL;
+	}
+	/* better safe than sorry, use 4 as the maximal subsampling ratio */
+	if (rec->src_height < N_VERT_Y_TAPS*4
+	    || rec->src_width < N_HORIZ_Y_TAPS*4)
+		return -EINVAL;
+
+	/* check alingment constrains */
+	switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+		case I915_OVERLAY_RGB:
+			/* not implemented */
+			return -EINVAL;
+		case I915_OVERLAY_YUV_PACKED:
+			depth = packed_depth_bytes(rec->flags);
+			if (uv_vscale != 1)
+				return -EINVAL;
+			if (depth < 0)
+				return depth;
+			/* ignore UV planes */
+			rec->stride_UV = 0;
+			rec->offset_U = 0;
+			rec->offset_V = 0;
+			/* check pixel alignment */
+			if (rec->offset_Y % depth)
+				return -EINVAL;
+			break;
+		case I915_OVERLAY_YUV_PLANAR:
+			if (uv_vscale < 0 || uv_hscale < 0)
+				return -EINVAL;
+			/* no offset restrictions for planar formats */
+			break;
+		default:
+			return -EINVAL;
+	}
+
+	if (rec->src_width % uv_hscale)
+		return -EINVAL;
+
+	/* stride checking */
+	stride_mask = 63;
+
+	if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
+		return -EINVAL;
+	if (IS_I965G(dev) && rec->stride_Y < 512)
+		return -EINVAL;
+
+	tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
+		4 : 8;
+	if (rec->stride_Y > tmp*1024 || rec->stride_UV > 2*1024)
+		return -EINVAL;
+
+	/* check buffer dimensions */
+	switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+		case I915_OVERLAY_RGB:
+		case I915_OVERLAY_YUV_PACKED:
+			/* always 4 Y values per depth pixels */
+			if (packed_width_bytes(rec->flags, rec->src_width)
+					> rec->stride_Y)
+				return -EINVAL;
+
+			tmp = rec->stride_Y*rec->src_height;
+			if (rec->offset_Y + tmp > new_bo->size)
+				return -EINVAL;
+			break;
+		case I915_OVERLAY_YUV_PLANAR:
+			if (rec->src_width > rec->stride_Y)
+				return -EINVAL;
+			if (rec->src_width/uv_hscale > rec->stride_UV)
+				return -EINVAL;
+
+			tmp = rec->stride_Y*rec->src_height;
+			if (rec->offset_Y + tmp > new_bo->size)
+				return -EINVAL;
+			tmp = rec->stride_UV*rec->src_height;
+			tmp /= uv_vscale;
+			if (rec->offset_U + tmp > new_bo->size
+			    || rec->offset_V + tmp > new_bo->size)
+				return -EINVAL;
+			break;
+	}
+
+	return 0;
+}
+
+int intel_overlay_put_image(struct drm_device *dev, void *data,
+                            struct drm_file *file_priv)
+{
+	struct drm_intel_overlay_put_image *put_image_rec = data;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_overlay *overlay;
+	struct drm_mode_object *drmmode_obj;
+	struct intel_crtc *crtc;
+	struct drm_gem_object *new_bo;
+	struct put_image_params *params;
+	int ret;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	overlay = dev_priv->overlay;
+	if (!overlay) {
+		DRM_DEBUG("userspace bug: no overlay\n");
+		return -ENODEV;
+	}
+
+	if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) {
+		mutex_lock(&dev->mode_config.mutex);
+		mutex_lock(&dev->struct_mutex);
+
+		ret = intel_overlay_switch_off(overlay);
+
+		mutex_unlock(&dev->struct_mutex);
+		mutex_unlock(&dev->mode_config.mutex);
+
+		return ret;
+	}
+
+	params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL);
+	if (!params)
+		return -ENOMEM;
+
+	drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
+                        DRM_MODE_OBJECT_CRTC);
+	if (!drmmode_obj)
+		return -ENOENT;
+	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
+
+	new_bo = drm_gem_object_lookup(dev, file_priv,
+			put_image_rec->bo_handle);
+	if (!new_bo)
+		return -ENOENT;
+
+	mutex_lock(&dev->mode_config.mutex);
+	mutex_lock(&dev->struct_mutex);
+
+	if (overlay->hw_wedged) {
+		ret = intel_overlay_recover_from_interrupt(overlay, 1);
+		if (ret != 0)
+			goto out_unlock;
+	}
+
+	if (overlay->crtc != crtc) {
+		struct drm_display_mode *mode = &crtc->base.mode;
+		ret = intel_overlay_switch_off(overlay);
+		if (ret != 0)
+			goto out_unlock;
+
+		ret = check_overlay_possible_on_crtc(overlay, crtc);
+		if (ret != 0)
+			goto out_unlock;
+
+		overlay->crtc = crtc;
+		crtc->overlay = overlay;
+
+		if (intel_panel_fitter_pipe(dev) == crtc->pipe
+		    /* and line to wide, i.e. one-line-mode */
+		    && mode->hdisplay > 1024) {
+			overlay->pfit_active = 1;
+			update_pfit_vscale_ratio(overlay);
+		} else
+			overlay->pfit_active = 0;
+	}
+
+	ret = check_overlay_dst(overlay, put_image_rec);
+	if (ret != 0)
+		goto out_unlock;
+
+	if (overlay->pfit_active) {
+		params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
+			overlay->pfit_vscale_ratio);
+		/* shifting right rounds downwards, so add 1 */
+		params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
+			overlay->pfit_vscale_ratio) + 1;
+	} else {
+		params->dst_y = put_image_rec->dst_y;
+		params->dst_h = put_image_rec->dst_height;
+	}
+	params->dst_x = put_image_rec->dst_x;
+	params->dst_w = put_image_rec->dst_width;
+
+	params->src_w = put_image_rec->src_width;
+	params->src_h = put_image_rec->src_height;
+	params->src_scan_w = put_image_rec->src_scan_width;
+	params->src_scan_h = put_image_rec->src_scan_height;
+	if (params->src_scan_h > params->src_h
+	    || params->src_scan_w > params->src_w) {
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	ret = check_overlay_src(dev, put_image_rec, new_bo);
+	if (ret != 0)
+		goto out_unlock;
+	params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
+	params->stride_Y = put_image_rec->stride_Y;
+	params->stride_UV = put_image_rec->stride_UV;
+	params->offset_Y = put_image_rec->offset_Y;
+	params->offset_U = put_image_rec->offset_U;
+	params->offset_V = put_image_rec->offset_V;
+
+	/* Check scaling after src size to prevent a divide-by-zero. */
+	ret = check_overlay_scaling(params);
+	if (ret != 0)
+		goto out_unlock;
+
+	ret = intel_overlay_do_put_image(overlay, new_bo, params);
+	if (ret != 0)
+		goto out_unlock;
+
+	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev->mode_config.mutex);
+
+	kfree(params);
+
+	return 0;
+
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev->mode_config.mutex);
+	drm_gem_object_unreference(new_bo);
+	kfree(params);
+
+	return ret;
+}
+
+static void update_reg_attrs(struct intel_overlay *overlay,
+			     struct overlay_registers *regs)
+{
+	regs->OCLRC0 = (overlay->contrast << 18) | (overlay->brightness & 0xff);
+	regs->OCLRC1 = overlay->saturation;
+}
+
+static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
+{
+	int i;
+
+	if (gamma1 & 0xff000000 || gamma2 & 0xff000000)
+		return false;
+
+	for (i = 0; i < 3; i++) {
+		if (((gamma1 >> i * 8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
+			return false;
+	}
+
+	return true;
+}
+
+static bool check_gamma5_errata(u32 gamma5)
+{
+	int i;
+
+	for (i = 0; i < 3; i++) {
+		if (((gamma5 >> i*8) & 0xff) == 0x80)
+			return false;
+	}
+
+	return true;
+}
+
+static int check_gamma(struct drm_intel_overlay_attrs *attrs)
+{
+	if (!check_gamma_bounds(0, attrs->gamma0)
+	    || !check_gamma_bounds(attrs->gamma0, attrs->gamma1)
+	    || !check_gamma_bounds(attrs->gamma1, attrs->gamma2)
+	    || !check_gamma_bounds(attrs->gamma2, attrs->gamma3)
+	    || !check_gamma_bounds(attrs->gamma3, attrs->gamma4)
+	    || !check_gamma_bounds(attrs->gamma4, attrs->gamma5)
+	    || !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
+		return -EINVAL;
+	if (!check_gamma5_errata(attrs->gamma5))
+		return -EINVAL;
+	return 0;
+}
+
+int intel_overlay_attrs(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+	struct drm_intel_overlay_attrs *attrs = data;
+        drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_overlay *overlay;
+	struct overlay_registers *regs;
+	int ret;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	overlay = dev_priv->overlay;
+	if (!overlay) {
+		DRM_DEBUG("userspace bug: no overlay\n");
+		return -ENODEV;
+	}
+
+	mutex_lock(&dev->mode_config.mutex);
+	mutex_lock(&dev->struct_mutex);
+
+	if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
+		attrs->color_key = overlay->color_key;
+		attrs->brightness = overlay->brightness;
+		attrs->contrast = overlay->contrast;
+		attrs->saturation = overlay->saturation;
+
+		if (IS_I9XX(dev)) {
+			attrs->gamma0 = I915_READ(OGAMC0);
+			attrs->gamma1 = I915_READ(OGAMC1);
+			attrs->gamma2 = I915_READ(OGAMC2);
+			attrs->gamma3 = I915_READ(OGAMC3);
+			attrs->gamma4 = I915_READ(OGAMC4);
+			attrs->gamma5 = I915_READ(OGAMC5);
+		}
+		ret = 0;
+	} else {
+		overlay->color_key = attrs->color_key;
+		if (attrs->brightness >= -128 && attrs->brightness <= 127) {
+			overlay->brightness = attrs->brightness;
+		} else {
+			ret = -EINVAL;
+			goto out_unlock;
+		}
+		if (attrs->contrast <= 255) {
+			overlay->contrast = attrs->contrast;
+		} else {
+			ret = -EINVAL;
+			goto out_unlock;
+		}
+		if (attrs->saturation <= 1023) {
+			overlay->saturation = attrs->saturation;
+		} else {
+			ret = -EINVAL;
+			goto out_unlock;
+		}
+
+		regs = intel_overlay_map_regs_atomic(overlay);
+		if (!regs) {
+			ret = -ENOMEM;
+			goto out_unlock;
+		}
+
+		update_reg_attrs(overlay, regs);
+
+		intel_overlay_unmap_regs_atomic(overlay);
+
+		if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
+			if (!IS_I9XX(dev)) {
+				ret = -EINVAL;
+				goto out_unlock;
+			}
+
+			if (overlay->active) {
+				ret = -EBUSY;
+				goto out_unlock;
+			}
+
+			ret = check_gamma(attrs);
+			if (ret != 0)
+				goto out_unlock;
+
+			I915_WRITE(OGAMC0, attrs->gamma0);
+			I915_WRITE(OGAMC1, attrs->gamma1);
+			I915_WRITE(OGAMC2, attrs->gamma2);
+			I915_WRITE(OGAMC3, attrs->gamma3);
+			I915_WRITE(OGAMC4, attrs->gamma4);
+			I915_WRITE(OGAMC5, attrs->gamma5);
+		}
+		ret = 0;
+	}
+
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev->mode_config.mutex);
+
+	return ret;
+}
+
+void intel_setup_overlay(struct drm_device *dev)
+{
+        drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_overlay *overlay;
+	struct drm_gem_object *reg_bo;
+	struct overlay_registers *regs;
+	int ret;
+
+	if (!OVERLAY_EXISTS(dev))
+		return;
+
+	overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
+	if (!overlay)
+		return;
+	overlay->dev = dev;
+
+	reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE);
+	if (!reg_bo)
+		goto out_free;
+	overlay->reg_bo = reg_bo->driver_private;
+
+	if (OVERLAY_NONPHYSICAL(dev)) {
+		ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
+		if (ret) {
+                        DRM_ERROR("failed to pin overlay register bo\n");
+                        goto out_free_bo;
+                }
+		overlay->flip_addr = overlay->reg_bo->gtt_offset;
+	} else {
+		ret = i915_gem_attach_phys_object(dev, reg_bo,
+				I915_GEM_PHYS_OVERLAY_REGS);
+                if (ret) {
+                        DRM_ERROR("failed to attach phys overlay regs\n");
+                        goto out_free_bo;
+                }
+		overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
+	}
+
+	/* init all values */
+	overlay->color_key = 0x0101fe;
+	overlay->brightness = -19;
+	overlay->contrast = 75;
+	overlay->saturation = 146;
+
+	regs = intel_overlay_map_regs_atomic(overlay);
+	if (!regs)
+		goto out_free_bo;
+
+	memset(regs, 0, sizeof(struct overlay_registers));
+	update_polyphase_filter(regs);
+
+	update_reg_attrs(overlay, regs);
+
+	intel_overlay_unmap_regs_atomic(overlay);
+
+	dev_priv->overlay = overlay;
+	DRM_INFO("initialized overlay support\n");
+	return;
+
+out_free_bo:
+	drm_gem_object_unreference(reg_bo);
+out_free:
+	kfree(overlay);
+	return;
+}
+
+void intel_cleanup_overlay(struct drm_device *dev)
+{
+        drm_i915_private_t *dev_priv = dev->dev_private;
+
+	if (dev_priv->overlay) {
+		/* The bo's should be free'd by the generic code already.
+		 * Furthermore modesetting teardown happens beforehand so the
+		 * hardware should be off already */
+		BUG_ON(dev_priv->overlay->active);
+
+		kfree(dev_priv->overlay);
+	}
+}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e7fa327..24a3dc9 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -36,8 +36,6 @@
 #include "i915_drv.h"
 #include "intel_sdvo_regs.h"
 
-#undef SDVO_DEBUG
-
 static char *tv_format_names[] = {
 	"NTSC_M"   , "NTSC_J"  , "NTSC_443",
 	"PAL_B"    , "PAL_D"   , "PAL_G"   ,
@@ -356,7 +354,6 @@
 #define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
 #define SDVO_PRIV(output)   ((struct intel_sdvo_priv *) (output)->dev_priv)
 
-#ifdef SDVO_DEBUG
 static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
 				   void *args, int args_len)
 {
@@ -379,9 +376,6 @@
 		DRM_LOG_KMS("(%02X)", cmd);
 	DRM_LOG_KMS("\n");
 }
-#else
-#define intel_sdvo_debug_write(o, c, a, l)
-#endif
 
 static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd,
 				 void *args, int args_len)
@@ -398,7 +392,6 @@
 	intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd);
 }
 
-#ifdef SDVO_DEBUG
 static const char *cmd_status_names[] = {
 	"Power on",
 	"Success",
@@ -427,9 +420,6 @@
 		DRM_LOG_KMS("(??? %d)", status);
 	DRM_LOG_KMS("\n");
 }
-#else
-#define intel_sdvo_debug_response(o, r, l, s)
-#endif
 
 static u8 intel_sdvo_read_response(struct intel_output *intel_output,
 				   void *response, int response_len)
@@ -1627,6 +1617,10 @@
 
 	intel_sdvo_write_cmd(intel_output,
 			     SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
+	if (sdvo_priv->is_tv) {
+		/* add 30ms delay when the output type is SDVO-TV */
+		mdelay(30);
+	}
 	status = intel_sdvo_read_response(intel_output, &response, 2);
 
 	DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 9ca9179..552ec11 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1213,20 +1213,17 @@
 		tv_ctl |= TV_TRILEVEL_SYNC;
 	if (tv_mode->pal_burst)
 		tv_ctl |= TV_PAL_BURST;
-	scctl1 = 0;
-	/* dda1 implies valid video levels */
-	if (tv_mode->dda1_inc) {
-		scctl1 |= TV_SC_DDA1_EN;
-	}
 
+	scctl1 = 0;
+	if (tv_mode->dda1_inc)
+		scctl1 |= TV_SC_DDA1_EN;
 	if (tv_mode->dda2_inc)
 		scctl1 |= TV_SC_DDA2_EN;
-
 	if (tv_mode->dda3_inc)
 		scctl1 |= TV_SC_DDA3_EN;
-
 	scctl1 |= tv_mode->sc_reset;
-	scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
+	if (video_levels)
+		scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
 	scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
 
 	scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
@@ -1416,16 +1413,16 @@
 	 *  0 0 0 Component
 	 */
 	if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
-		DRM_DEBUG("Detected Composite TV connection\n");
+		DRM_DEBUG_KMS("Detected Composite TV connection\n");
 		type = DRM_MODE_CONNECTOR_Composite;
 	} else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
-		DRM_DEBUG("Detected S-Video TV connection\n");
+		DRM_DEBUG_KMS("Detected S-Video TV connection\n");
 		type = DRM_MODE_CONNECTOR_SVIDEO;
 	} else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
-		DRM_DEBUG("Detected Component TV connection\n");
+		DRM_DEBUG_KMS("Detected Component TV connection\n");
 		type = DRM_MODE_CONNECTOR_Component;
 	} else {
-		DRM_DEBUG("No TV connection detected\n");
+		DRM_DEBUG_KMS("No TV connection detected\n");
 		type = -1;
 	}
 
@@ -1702,6 +1699,41 @@
 	.destroy = intel_tv_enc_destroy,
 };
 
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the integrated TV is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the TV is present.
+ */
+static int tv_is_present_in_vbt(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct child_device_config *p_child;
+	int i, ret;
+
+	if (!dev_priv->child_dev_num)
+		return 1;
+
+	ret = 0;
+	for (i = 0; i < dev_priv->child_dev_num; i++) {
+		p_child = dev_priv->child_dev + i;
+		/*
+		 * If the device type is not TV, continue.
+		 */
+		if (p_child->device_type != DEVICE_TYPE_INT_TV &&
+			p_child->device_type != DEVICE_TYPE_TV)
+			continue;
+		/* Only when the addin_offset is non-zero, it is regarded
+		 * as present.
+		 */
+		if (p_child->addin_offset) {
+			ret = 1;
+			break;
+		}
+	}
+	return ret;
+}
 
 void
 intel_tv_init(struct drm_device *dev)
@@ -1717,6 +1749,10 @@
 	if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
 		return;
 
+	if (!tv_is_present_in_vbt(dev)) {
+		DRM_DEBUG_KMS("Integrated TV is not present.\n");
+		return;
+	}
 	/* Even if we have an encoder we may not have a connector */
 	if (!dev_priv->int_tv_support)
 		return;
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
new file mode 100644
index 0000000..d823e63
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -0,0 +1,44 @@
+config DRM_NOUVEAU
+	tristate "Nouveau (nVidia) cards"
+	depends on DRM
+        select FW_LOADER
+	select DRM_KMS_HELPER
+	select DRM_TTM
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	select FB
+	select FRAMEBUFFER_CONSOLE if !EMBEDDED
+	select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
+	help
+	  Choose this option for open-source nVidia support.
+
+config DRM_NOUVEAU_BACKLIGHT
+	bool "Support for backlight control"
+	depends on DRM_NOUVEAU
+	default y
+	help
+	  Say Y here if you want to control the backlight of your display
+	  (e.g. a laptop panel).
+
+config DRM_NOUVEAU_DEBUG
+	bool "Build in Nouveau's debugfs support"
+	depends on DRM_NOUVEAU && DEBUG_FS
+	default y
+	help
+	  Say Y here if you want Nouveau to output debugging information
+	  via debugfs.
+
+menu "I2C encoder or helper chips"
+     depends on DRM
+
+config DRM_I2C_CH7006
+	tristate "Chrontel ch7006 TV encoder"
+	default m if DRM_NOUVEAU
+	help
+	  Support for Chrontel ch7006 and similar TV encoders, found
+	  on some nVidia video cards.
+
+	  This driver is currently only useful if you're also using
+	  the nouveau driver.
+endmenu
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
new file mode 100644
index 0000000..1d90d4d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -0,0 +1,31 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
+             nouveau_object.o nouveau_irq.o nouveau_notifier.o \
+             nouveau_sgdma.o nouveau_dma.o \
+             nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
+             nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
+	     nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
+	     nouveau_dp.o \
+             nv04_timer.o \
+             nv04_mc.o nv40_mc.o nv50_mc.o \
+             nv04_fb.o nv10_fb.o nv40_fb.o \
+             nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
+             nv04_graph.o nv10_graph.o nv20_graph.o \
+             nv40_graph.o nv50_graph.o \
+             nv04_instmem.o nv50_instmem.o \
+             nv50_crtc.o nv50_dac.o nv50_sor.o \
+             nv50_cursor.o nv50_display.o nv50_fbcon.o \
+             nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
+             nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
+             nv17_gpio.o
+
+nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
+nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
+nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
+nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
+
+obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
new file mode 100644
index 0000000..1cf4882
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -0,0 +1,125 @@
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/acpi_bus.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_sarea.h"
+#include "drm_crtc_helper.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nv50_display.h"
+
+#define NOUVEAU_DSM_SUPPORTED 0x00
+#define NOUVEAU_DSM_SUPPORTED_FUNCTIONS 0x00
+
+#define NOUVEAU_DSM_ACTIVE 0x01
+#define NOUVEAU_DSM_ACTIVE_QUERY 0x00
+
+#define NOUVEAU_DSM_LED 0x02
+#define NOUVEAU_DSM_LED_STATE 0x00
+#define NOUVEAU_DSM_LED_OFF 0x10
+#define NOUVEAU_DSM_LED_STAMINA 0x11
+#define NOUVEAU_DSM_LED_SPEED 0x12
+
+#define NOUVEAU_DSM_POWER 0x03
+#define NOUVEAU_DSM_POWER_STATE 0x00
+#define NOUVEAU_DSM_POWER_SPEED 0x01
+#define NOUVEAU_DSM_POWER_STAMINA 0x02
+
+static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result)
+{
+	static char muid[] = {
+		0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
+		0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
+	};
+
+	struct pci_dev *pdev = dev->pdev;
+	struct acpi_handle *handle;
+	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+	struct acpi_object_list input;
+	union acpi_object params[4];
+	union acpi_object *obj;
+	int err;
+
+	handle = DEVICE_ACPI_HANDLE(&pdev->dev);
+
+	if (!handle)
+		return -ENODEV;
+
+	input.count = 4;
+	input.pointer = params;
+	params[0].type = ACPI_TYPE_BUFFER;
+	params[0].buffer.length = sizeof(muid);
+	params[0].buffer.pointer = (char *)muid;
+	params[1].type = ACPI_TYPE_INTEGER;
+	params[1].integer.value = 0x00000102;
+	params[2].type = ACPI_TYPE_INTEGER;
+	params[2].integer.value = func;
+	params[3].type = ACPI_TYPE_INTEGER;
+	params[3].integer.value = arg;
+
+	err = acpi_evaluate_object(handle, "_DSM", &input, &output);
+	if (err) {
+		NV_INFO(dev, "failed to evaluate _DSM: %d\n", err);
+		return err;
+	}
+
+	obj = (union acpi_object *)output.pointer;
+
+	if (obj->type == ACPI_TYPE_INTEGER)
+		if (obj->integer.value == 0x80000002)
+			return -ENODEV;
+
+	if (obj->type == ACPI_TYPE_BUFFER) {
+		if (obj->buffer.length == 4 && result) {
+			*result = 0;
+			*result |= obj->buffer.pointer[0];
+			*result |= (obj->buffer.pointer[1] << 8);
+			*result |= (obj->buffer.pointer[2] << 16);
+			*result |= (obj->buffer.pointer[3] << 24);
+		}
+	}
+
+	kfree(output.pointer);
+	return 0;
+}
+
+int nouveau_hybrid_setup(struct drm_device *dev)
+{
+	int result;
+
+	if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY,
+								&result))
+		return -ENODEV;
+
+	NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result);
+
+	if (result & 0x1) {	/* Stamina mode - disable the external GPU */
+		nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA,
+									NULL);
+		nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA,
+									NULL);
+	} else {		/* Ensure that the external GPU is enabled */
+		nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
+		nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
+									NULL);
+	}
+
+	return 0;
+}
+
+bool nouveau_dsm_probe(struct drm_device *dev)
+{
+	int support = 0;
+
+	if (nouveau_dsm(dev, NOUVEAU_DSM_SUPPORTED,
+				NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &support))
+		return false;
+
+	if (!support)
+		return false;
+
+	return true;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
new file mode 100644
index 0000000..20564f8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2009 Red Hat <mjg@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ *  Matthew Garrett <mjg@redhat.com>
+ *
+ * Register locations derived from NVClock by Roderick Colenbrander
+ */
+
+#include <linux/backlight.h>
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
+
+static int nv40_get_intensity(struct backlight_device *bd)
+{
+	struct drm_device *dev = bl_get_data(bd);
+	int val = (nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)
+									>> 16;
+
+	return val;
+}
+
+static int nv40_set_intensity(struct backlight_device *bd)
+{
+	struct drm_device *dev = bl_get_data(bd);
+	int val = bd->props.brightness;
+	int reg = nv_rd32(dev, NV40_PMC_BACKLIGHT);
+
+	nv_wr32(dev, NV40_PMC_BACKLIGHT,
+		 (val << 16) | (reg & ~NV40_PMC_BACKLIGHT_MASK));
+
+	return 0;
+}
+
+static struct backlight_ops nv40_bl_ops = {
+	.options = BL_CORE_SUSPENDRESUME,
+	.get_brightness = nv40_get_intensity,
+	.update_status = nv40_set_intensity,
+};
+
+static int nv50_get_intensity(struct backlight_device *bd)
+{
+	struct drm_device *dev = bl_get_data(bd);
+
+	return nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT);
+}
+
+static int nv50_set_intensity(struct backlight_device *bd)
+{
+	struct drm_device *dev = bl_get_data(bd);
+	int val = bd->props.brightness;
+
+	nv_wr32(dev, NV50_PDISPLAY_SOR_BACKLIGHT,
+		val | NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE);
+	return 0;
+}
+
+static struct backlight_ops nv50_bl_ops = {
+	.options = BL_CORE_SUSPENDRESUME,
+	.get_brightness = nv50_get_intensity,
+	.update_status = nv50_set_intensity,
+};
+
+static int nouveau_nv40_backlight_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct backlight_device *bd;
+
+	if (!(nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
+		return 0;
+
+	bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev,
+				       &nv40_bl_ops);
+	if (IS_ERR(bd))
+		return PTR_ERR(bd);
+
+	dev_priv->backlight = bd;
+	bd->props.max_brightness = 31;
+	bd->props.brightness = nv40_get_intensity(bd);
+	backlight_update_status(bd);
+
+	return 0;
+}
+
+static int nouveau_nv50_backlight_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct backlight_device *bd;
+
+	if (!nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT))
+		return 0;
+
+	bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev,
+				       &nv50_bl_ops);
+	if (IS_ERR(bd))
+		return PTR_ERR(bd);
+
+	dev_priv->backlight = bd;
+	bd->props.max_brightness = 1025;
+	bd->props.brightness = nv50_get_intensity(bd);
+	backlight_update_status(bd);
+	return 0;
+}
+
+int nouveau_backlight_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	switch (dev_priv->card_type) {
+	case NV_40:
+		return nouveau_nv40_backlight_init(dev);
+	case NV_50:
+		return nouveau_nv50_backlight_init(dev);
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+void nouveau_backlight_exit(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->backlight) {
+		backlight_device_unregister(dev_priv->backlight);
+		dev_priv->backlight = NULL;
+	}
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
new file mode 100644
index 0000000..5eec5ed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -0,0 +1,6095 @@
+/*
+ * Copyright 2005-2006 Erik Waling
+ * Copyright 2006 Stephane Marchesin
+ * Copyright 2007-2009 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "drmP.h"
+#define NV_DEBUG_NOTRACE
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+
+/* these defines are made up */
+#define NV_CIO_CRE_44_HEADA 0x0
+#define NV_CIO_CRE_44_HEADB 0x3
+#define FEATURE_MOBILE 0x10	/* also FEATURE_QUADRO for BMP */
+#define LEGACY_I2C_CRT 0x80
+#define LEGACY_I2C_PANEL 0x81
+#define LEGACY_I2C_TV 0x82
+
+#define EDID1_LEN 128
+
+#define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg)
+#define LOG_OLD_VALUE(x)
+
+#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x))
+#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x))
+
+struct init_exec {
+	bool execute;
+	bool repeat;
+};
+
+static bool nv_cksum(const uint8_t *data, unsigned int length)
+{
+	/*
+	 * There's a few checksums in the BIOS, so here's a generic checking
+	 * function.
+	 */
+	int i;
+	uint8_t sum = 0;
+
+	for (i = 0; i < length; i++)
+		sum += data[i];
+
+	if (sum)
+		return true;
+
+	return false;
+}
+
+static int
+score_vbios(struct drm_device *dev, const uint8_t *data, const bool writeable)
+{
+	if (!(data[0] == 0x55 && data[1] == 0xAA)) {
+		NV_TRACEWARN(dev, "... BIOS signature not found\n");
+		return 0;
+	}
+
+	if (nv_cksum(data, data[2] * 512)) {
+		NV_TRACEWARN(dev, "... BIOS checksum invalid\n");
+		/* if a ro image is somewhat bad, it's probably all rubbish */
+		return writeable ? 2 : 1;
+	} else
+		NV_TRACE(dev, "... appears to be valid\n");
+
+	return 3;
+}
+
+static void load_vbios_prom(struct drm_device *dev, uint8_t *data)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t pci_nv_20, save_pci_nv_20;
+	int pcir_ptr;
+	int i;
+
+	if (dev_priv->card_type >= NV_50)
+		pci_nv_20 = 0x88050;
+	else
+		pci_nv_20 = NV_PBUS_PCI_NV_20;
+
+	/* enable ROM access */
+	save_pci_nv_20 = nvReadMC(dev, pci_nv_20);
+	nvWriteMC(dev, pci_nv_20,
+		  save_pci_nv_20 & ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
+
+	/* bail if no rom signature */
+	if (nv_rd08(dev, NV_PROM_OFFSET) != 0x55 ||
+	    nv_rd08(dev, NV_PROM_OFFSET + 1) != 0xaa)
+		goto out;
+
+	/* additional check (see note below) - read PCI record header */
+	pcir_ptr = nv_rd08(dev, NV_PROM_OFFSET + 0x18) |
+		   nv_rd08(dev, NV_PROM_OFFSET + 0x19) << 8;
+	if (nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr) != 'P' ||
+	    nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 1) != 'C' ||
+	    nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 2) != 'I' ||
+	    nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 3) != 'R')
+		goto out;
+
+	/* on some 6600GT/6800LE prom reads are messed up.  nvclock alleges a
+	 * a good read may be obtained by waiting or re-reading (cargocult: 5x)
+	 * each byte.  we'll hope pramin has something usable instead
+	 */
+	for (i = 0; i < NV_PROM_SIZE; i++)
+		data[i] = nv_rd08(dev, NV_PROM_OFFSET + i);
+
+out:
+	/* disable ROM access */
+	nvWriteMC(dev, pci_nv_20,
+		  save_pci_nv_20 | NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
+}
+
+static void load_vbios_pramin(struct drm_device *dev, uint8_t *data)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t old_bar0_pramin = 0;
+	int i;
+
+	if (dev_priv->card_type >= NV_50) {
+		uint32_t vbios_vram = (nv_rd32(dev, 0x619f04) & ~0xff) << 8;
+
+		if (!vbios_vram)
+			vbios_vram = (nv_rd32(dev, 0x1700) << 16) + 0xf0000;
+
+		old_bar0_pramin = nv_rd32(dev, 0x1700);
+		nv_wr32(dev, 0x1700, vbios_vram >> 16);
+	}
+
+	/* bail if no rom signature */
+	if (nv_rd08(dev, NV_PRAMIN_OFFSET) != 0x55 ||
+	    nv_rd08(dev, NV_PRAMIN_OFFSET + 1) != 0xaa)
+		goto out;
+
+	for (i = 0; i < NV_PROM_SIZE; i++)
+		data[i] = nv_rd08(dev, NV_PRAMIN_OFFSET + i);
+
+out:
+	if (dev_priv->card_type >= NV_50)
+		nv_wr32(dev, 0x1700, old_bar0_pramin);
+}
+
+static void load_vbios_pci(struct drm_device *dev, uint8_t *data)
+{
+	void __iomem *rom = NULL;
+	size_t rom_len;
+	int ret;
+
+	ret = pci_enable_rom(dev->pdev);
+	if (ret)
+		return;
+
+	rom = pci_map_rom(dev->pdev, &rom_len);
+	if (!rom)
+		goto out;
+	memcpy_fromio(data, rom, rom_len);
+	pci_unmap_rom(dev->pdev, rom);
+
+out:
+	pci_disable_rom(dev->pdev);
+}
+
+struct methods {
+	const char desc[8];
+	void (*loadbios)(struct drm_device *, uint8_t *);
+	const bool rw;
+	int score;
+};
+
+static struct methods nv04_methods[] = {
+	{ "PROM", load_vbios_prom, false },
+	{ "PRAMIN", load_vbios_pramin, true },
+	{ "PCIROM", load_vbios_pci, true },
+	{ }
+};
+
+static struct methods nv50_methods[] = {
+	{ "PRAMIN", load_vbios_pramin, true },
+	{ "PROM", load_vbios_prom, false },
+	{ "PCIROM", load_vbios_pci, true },
+	{ }
+};
+
+static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct methods *methods, *method;
+	int testscore = 3;
+
+	if (nouveau_vbios) {
+		method = nv04_methods;
+		while (method->loadbios) {
+			if (!strcasecmp(nouveau_vbios, method->desc))
+				break;
+			method++;
+		}
+
+		if (method->loadbios) {
+			NV_INFO(dev, "Attempting to use BIOS image from %s\n",
+				method->desc);
+
+			method->loadbios(dev, data);
+			if (score_vbios(dev, data, method->rw))
+				return true;
+		}
+
+		NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios);
+	}
+
+	if (dev_priv->card_type < NV_50)
+		methods = nv04_methods;
+	else
+		methods = nv50_methods;
+
+	method = methods;
+	while (method->loadbios) {
+		NV_TRACE(dev, "Attempting to load BIOS image from %s\n",
+			 method->desc);
+		data[0] = data[1] = 0;	/* avoid reuse of previous image */
+		method->loadbios(dev, data);
+		method->score = score_vbios(dev, data, method->rw);
+		if (method->score == testscore)
+			return true;
+		method++;
+	}
+
+	while (--testscore > 0) {
+		method = methods;
+		while (method->loadbios) {
+			if (method->score == testscore) {
+				NV_TRACE(dev, "Using BIOS image from %s\n",
+					 method->desc);
+				method->loadbios(dev, data);
+				return true;
+			}
+			method++;
+		}
+	}
+
+	NV_ERROR(dev, "No valid BIOS image found\n");
+	return false;
+}
+
+struct init_tbl_entry {
+	char *name;
+	uint8_t id;
+	int length;
+	int length_offset;
+	int length_multiplier;
+	bool (*handler)(struct nvbios *, uint16_t, struct init_exec *);
+};
+
+struct bit_entry {
+	uint8_t id[2];
+	uint16_t length;
+	uint16_t offset;
+};
+
+static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *);
+
+#define MACRO_INDEX_SIZE	2
+#define MACRO_SIZE		8
+#define CONDITION_SIZE		12
+#define IO_FLAG_CONDITION_SIZE	9
+#define IO_CONDITION_SIZE	5
+#define MEM_INIT_SIZE		66
+
+static void still_alive(void)
+{
+#if 0
+	sync();
+	msleep(2);
+#endif
+}
+
+static uint32_t
+munge_reg(struct nvbios *bios, uint32_t reg)
+{
+	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+	struct dcb_entry *dcbent = bios->display.output;
+
+	if (dev_priv->card_type < NV_50)
+		return reg;
+
+	if (reg & 0x40000000) {
+		BUG_ON(!dcbent);
+
+		reg += (ffs(dcbent->or) - 1) * 0x800;
+		if ((reg & 0x20000000) && !(dcbent->sorconf.link & 1))
+			reg += 0x00000080;
+	}
+
+	reg &= ~0x60000000;
+	return reg;
+}
+
+static int
+valid_reg(struct nvbios *bios, uint32_t reg)
+{
+	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+	struct drm_device *dev = bios->dev;
+
+	/* C51 has misaligned regs on purpose. Marvellous */
+	if (reg & 0x2 || (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51)) {
+		NV_ERROR(dev, "========== misaligned reg 0x%08X ==========\n",
+			 reg);
+		return 0;
+	}
+	/*
+	 * Warn on C51 regs that have not been verified accessible in
+	 * mmiotracing
+	 */
+	if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 &&
+	    reg != 0x130d && reg != 0x1311 && reg != 0x60081d)
+		NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n",
+			reg);
+
+	/* Trust the init scripts on G80 */
+	if (dev_priv->card_type >= NV_50)
+		return 1;
+
+	#define WITHIN(x, y, z) ((x >= y) && (x < y + z))
+	if (WITHIN(reg, NV_PMC_OFFSET, NV_PMC_SIZE))
+		return 1;
+	if (WITHIN(reg, NV_PBUS_OFFSET, NV_PBUS_SIZE))
+		return 1;
+	if (WITHIN(reg, NV_PFIFO_OFFSET, NV_PFIFO_SIZE))
+		return 1;
+	if (dev_priv->VBIOS.pub.chip_version >= 0x30 &&
+	    (WITHIN(reg, 0x4000, 0x600) || reg == 0x00004600))
+		return 1;
+	if (dev_priv->VBIOS.pub.chip_version >= 0x40 &&
+						WITHIN(reg, 0xc000, 0x48))
+		return 1;
+	if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0000d204)
+		return 1;
+	if (dev_priv->VBIOS.pub.chip_version >= 0x40) {
+		if (reg == 0x00011014 || reg == 0x00020328)
+			return 1;
+		if (WITHIN(reg, 0x88000, NV_PBUS_SIZE)) /* new PBUS */
+			return 1;
+	}
+	if (WITHIN(reg, NV_PFB_OFFSET, NV_PFB_SIZE))
+		return 1;
+	if (WITHIN(reg, NV_PEXTDEV_OFFSET, NV_PEXTDEV_SIZE))
+		return 1;
+	if (WITHIN(reg, NV_PCRTC0_OFFSET, NV_PCRTC0_SIZE * 2))
+		return 1;
+	if (WITHIN(reg, NV_PRAMDAC0_OFFSET, NV_PRAMDAC0_SIZE * 2))
+		return 1;
+	if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0070fff0)
+		return 1;
+	if (dev_priv->VBIOS.pub.chip_version == 0x51 &&
+				WITHIN(reg, NV_PRAMIN_OFFSET, NV_PRAMIN_SIZE))
+		return 1;
+	#undef WITHIN
+
+	NV_ERROR(dev, "========== unknown reg 0x%08X ==========\n", reg);
+
+	return 0;
+}
+
+static bool
+valid_idx_port(struct nvbios *bios, uint16_t port)
+{
+	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+	struct drm_device *dev = bios->dev;
+
+	/*
+	 * If adding more ports here, the read/write functions below will need
+	 * updating so that the correct mmio range (PRMCIO, PRMDIO, PRMVIO) is
+	 * used for the port in question
+	 */
+	if (dev_priv->card_type < NV_50) {
+		if (port == NV_CIO_CRX__COLOR)
+			return true;
+		if (port == NV_VIO_SRX)
+			return true;
+	} else {
+		if (port == NV_CIO_CRX__COLOR)
+			return true;
+	}
+
+	NV_ERROR(dev, "========== unknown indexed io port 0x%04X ==========\n",
+		 port);
+
+	return false;
+}
+
+static bool
+valid_port(struct nvbios *bios, uint16_t port)
+{
+	struct drm_device *dev = bios->dev;
+
+	/*
+	 * If adding more ports here, the read/write functions below will need
+	 * updating so that the correct mmio range (PRMCIO, PRMDIO, PRMVIO) is
+	 * used for the port in question
+	 */
+	if (port == NV_VIO_VSE2)
+		return true;
+
+	NV_ERROR(dev, "========== unknown io port 0x%04X ==========\n", port);
+
+	return false;
+}
+
+static uint32_t
+bios_rd32(struct nvbios *bios, uint32_t reg)
+{
+	uint32_t data;
+
+	reg = munge_reg(bios, reg);
+	if (!valid_reg(bios, reg))
+		return 0;
+
+	/*
+	 * C51 sometimes uses regs with bit0 set in the address. For these
+	 * cases there should exist a translation in a BIOS table to an IO
+	 * port address which the BIOS uses for accessing the reg
+	 *
+	 * These only seem to appear for the power control regs to a flat panel,
+	 * and the GPIO regs at 0x60081*.  In C51 mmio traces the normal regs
+	 * for 0x1308 and 0x1310 are used - hence the mask below.  An S3
+	 * suspend-resume mmio trace from a C51 will be required to see if this
+	 * is true for the power microcode in 0x14.., or whether the direct IO
+	 * port access method is needed
+	 */
+	if (reg & 0x1)
+		reg &= ~0x1;
+
+	data = nv_rd32(bios->dev, reg);
+
+	BIOSLOG(bios, "	Read:  Reg: 0x%08X, Data: 0x%08X\n", reg, data);
+
+	return data;
+}
+
+static void
+bios_wr32(struct nvbios *bios, uint32_t reg, uint32_t data)
+{
+	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+
+	reg = munge_reg(bios, reg);
+	if (!valid_reg(bios, reg))
+		return;
+
+	/* see note in bios_rd32 */
+	if (reg & 0x1)
+		reg &= 0xfffffffe;
+
+	LOG_OLD_VALUE(bios_rd32(bios, reg));
+	BIOSLOG(bios, "	Write: Reg: 0x%08X, Data: 0x%08X\n", reg, data);
+
+	if (dev_priv->VBIOS.execute) {
+		still_alive();
+		nv_wr32(bios->dev, reg, data);
+	}
+}
+
+static uint8_t
+bios_idxprt_rd(struct nvbios *bios, uint16_t port, uint8_t index)
+{
+	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+	struct drm_device *dev = bios->dev;
+	uint8_t data;
+
+	if (!valid_idx_port(bios, port))
+		return 0;
+
+	if (dev_priv->card_type < NV_50) {
+		if (port == NV_VIO_SRX)
+			data = NVReadVgaSeq(dev, bios->state.crtchead, index);
+		else	/* assume NV_CIO_CRX__COLOR */
+			data = NVReadVgaCrtc(dev, bios->state.crtchead, index);
+	} else {
+		uint32_t data32;
+
+		data32 = bios_rd32(bios, NV50_PDISPLAY_VGACRTC(index & ~3));
+		data = (data32 >> ((index & 3) << 3)) & 0xff;
+	}
+
+	BIOSLOG(bios, "	Indexed IO read:  Port: 0x%04X, Index: 0x%02X, "
+		      "Head: 0x%02X, Data: 0x%02X\n",
+		port, index, bios->state.crtchead, data);
+	return data;
+}
+
+static void
+bios_idxprt_wr(struct nvbios *bios, uint16_t port, uint8_t index, uint8_t data)
+{
+	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+	struct drm_device *dev = bios->dev;
+
+	if (!valid_idx_port(bios, port))
+		return;
+
+	/*
+	 * The current head is maintained in the nvbios member  state.crtchead.
+	 * We trap changes to CR44 and update the head variable and hence the
+	 * register set written.
+	 * As CR44 only exists on CRTC0, we update crtchead to head0 in advance
+	 * of the write, and to head1 after the write
+	 */
+	if (port == NV_CIO_CRX__COLOR && index == NV_CIO_CRE_44 &&
+	    data != NV_CIO_CRE_44_HEADB)
+		bios->state.crtchead = 0;
+
+	LOG_OLD_VALUE(bios_idxprt_rd(bios, port, index));
+	BIOSLOG(bios, "	Indexed IO write: Port: 0x%04X, Index: 0x%02X, "
+		      "Head: 0x%02X, Data: 0x%02X\n",
+		port, index, bios->state.crtchead, data);
+
+	if (bios->execute && dev_priv->card_type < NV_50) {
+		still_alive();
+		if (port == NV_VIO_SRX)
+			NVWriteVgaSeq(dev, bios->state.crtchead, index, data);
+		else	/* assume NV_CIO_CRX__COLOR */
+			NVWriteVgaCrtc(dev, bios->state.crtchead, index, data);
+	} else
+	if (bios->execute) {
+		uint32_t data32, shift = (index & 3) << 3;
+
+		still_alive();
+
+		data32  = bios_rd32(bios, NV50_PDISPLAY_VGACRTC(index & ~3));
+		data32 &= ~(0xff << shift);
+		data32 |= (data << shift);
+		bios_wr32(bios, NV50_PDISPLAY_VGACRTC(index & ~3), data32);
+	}
+
+	if (port == NV_CIO_CRX__COLOR &&
+	    index == NV_CIO_CRE_44 && data == NV_CIO_CRE_44_HEADB)
+		bios->state.crtchead = 1;
+}
+
+static uint8_t
+bios_port_rd(struct nvbios *bios, uint16_t port)
+{
+	uint8_t data, head = bios->state.crtchead;
+
+	if (!valid_port(bios, port))
+		return 0;
+
+	data = NVReadPRMVIO(bios->dev, head, NV_PRMVIO0_OFFSET + port);
+
+	BIOSLOG(bios, "	IO read:  Port: 0x%04X, Head: 0x%02X, Data: 0x%02X\n",
+		port, head, data);
+
+	return data;
+}
+
+static void
+bios_port_wr(struct nvbios *bios, uint16_t port, uint8_t data)
+{
+	int head = bios->state.crtchead;
+
+	if (!valid_port(bios, port))
+		return;
+
+	LOG_OLD_VALUE(bios_port_rd(bios, port));
+	BIOSLOG(bios, "	IO write: Port: 0x%04X, Head: 0x%02X, Data: 0x%02X\n",
+		port, head, data);
+
+	if (!bios->execute)
+		return;
+
+	still_alive();
+	NVWritePRMVIO(bios->dev, head, NV_PRMVIO0_OFFSET + port, data);
+}
+
+static bool
+io_flag_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
+{
+	/*
+	 * The IO flag condition entry has 2 bytes for the CRTC port; 1 byte
+	 * for the CRTC index; 1 byte for the mask to apply to the value
+	 * retrieved from the CRTC; 1 byte for the shift right to apply to the
+	 * masked CRTC value; 2 bytes for the offset to the flag array, to
+	 * which the shifted value is added; 1 byte for the mask applied to the
+	 * value read from the flag array; and 1 byte for the value to compare
+	 * against the masked byte from the flag table.
+	 */
+
+	uint16_t condptr = bios->io_flag_condition_tbl_ptr + cond * IO_FLAG_CONDITION_SIZE;
+	uint16_t crtcport = ROM16(bios->data[condptr]);
+	uint8_t crtcindex = bios->data[condptr + 2];
+	uint8_t mask = bios->data[condptr + 3];
+	uint8_t shift = bios->data[condptr + 4];
+	uint16_t flagarray = ROM16(bios->data[condptr + 5]);
+	uint8_t flagarraymask = bios->data[condptr + 7];
+	uint8_t cmpval = bios->data[condptr + 8];
+	uint8_t data;
+
+	BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
+		      "Shift: 0x%02X, FlagArray: 0x%04X, FAMask: 0x%02X, "
+		      "Cmpval: 0x%02X\n",
+		offset, crtcport, crtcindex, mask, shift, flagarray, flagarraymask, cmpval);
+
+	data = bios_idxprt_rd(bios, crtcport, crtcindex);
+
+	data = bios->data[flagarray + ((data & mask) >> shift)];
+	data &= flagarraymask;
+
+	BIOSLOG(bios, "0x%04X: Checking if 0x%02X equals 0x%02X\n",
+		offset, data, cmpval);
+
+	return (data == cmpval);
+}
+
+static bool
+bios_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
+{
+	/*
+	 * The condition table entry has 4 bytes for the address of the
+	 * register to check, 4 bytes for a mask to apply to the register and
+	 * 4 for a test comparison value
+	 */
+
+	uint16_t condptr = bios->condition_tbl_ptr + cond * CONDITION_SIZE;
+	uint32_t reg = ROM32(bios->data[condptr]);
+	uint32_t mask = ROM32(bios->data[condptr + 4]);
+	uint32_t cmpval = ROM32(bios->data[condptr + 8]);
+	uint32_t data;
+
+	BIOSLOG(bios, "0x%04X: Cond: 0x%02X, Reg: 0x%08X, Mask: 0x%08X\n",
+		offset, cond, reg, mask);
+
+	data = bios_rd32(bios, reg) & mask;
+
+	BIOSLOG(bios, "0x%04X: Checking if 0x%08X equals 0x%08X\n",
+		offset, data, cmpval);
+
+	return (data == cmpval);
+}
+
+static bool
+io_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
+{
+	/*
+	 * The IO condition entry has 2 bytes for the IO port address; 1 byte
+	 * for the index to write to io_port; 1 byte for the mask to apply to
+	 * the byte read from io_port+1; and 1 byte for the value to compare
+	 * against the masked byte.
+	 */
+
+	uint16_t condptr = bios->io_condition_tbl_ptr + cond * IO_CONDITION_SIZE;
+	uint16_t io_port = ROM16(bios->data[condptr]);
+	uint8_t port_index = bios->data[condptr + 2];
+	uint8_t mask = bios->data[condptr + 3];
+	uint8_t cmpval = bios->data[condptr + 4];
+
+	uint8_t data = bios_idxprt_rd(bios, io_port, port_index) & mask;
+
+	BIOSLOG(bios, "0x%04X: Checking if 0x%02X equals 0x%02X\n",
+		offset, data, cmpval);
+
+	return (data == cmpval);
+}
+
+static int
+nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t reg0 = nv_rd32(dev, reg + 0);
+	uint32_t reg1 = nv_rd32(dev, reg + 4);
+	struct nouveau_pll_vals pll;
+	struct pll_lims pll_limits;
+	int ret;
+
+	ret = get_pll_limits(dev, reg, &pll_limits);
+	if (ret)
+		return ret;
+
+	clk = nouveau_calc_pll_mnp(dev, &pll_limits, clk, &pll);
+	if (!clk)
+		return -ERANGE;
+
+	reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16);
+	reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1;
+
+	if (dev_priv->VBIOS.execute) {
+		still_alive();
+		nv_wr32(dev, reg + 4, reg1);
+		nv_wr32(dev, reg + 0, reg0);
+	}
+
+	return 0;
+}
+
+static int
+setPLL(struct nvbios *bios, uint32_t reg, uint32_t clk)
+{
+	struct drm_device *dev = bios->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	/* clk in kHz */
+	struct pll_lims pll_lim;
+	struct nouveau_pll_vals pllvals;
+	int ret;
+
+	if (dev_priv->card_type >= NV_50)
+		return nv50_pll_set(dev, reg, clk);
+
+	/* high regs (such as in the mac g5 table) are not -= 4 */
+	ret = get_pll_limits(dev, reg > 0x405c ? reg : reg - 4, &pll_lim);
+	if (ret)
+		return ret;
+
+	clk = nouveau_calc_pll_mnp(dev, &pll_lim, clk, &pllvals);
+	if (!clk)
+		return -ERANGE;
+
+	if (bios->execute) {
+		still_alive();
+		nouveau_hw_setpll(dev, reg, &pllvals);
+	}
+
+	return 0;
+}
+
+static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->VBIOS;
+
+	/*
+	 * For the results of this function to be correct, CR44 must have been
+	 * set (using bios_idxprt_wr to set crtchead), CR58 set for CR57 = 0,
+	 * and the DCB table parsed, before the script calling the function is
+	 * run.  run_digital_op_script is example of how to do such setup
+	 */
+
+	uint8_t dcb_entry = NVReadVgaCrtc5758(dev, bios->state.crtchead, 0);
+
+	if (dcb_entry > bios->bdcb.dcb.entries) {
+		NV_ERROR(dev, "CR58 doesn't have a valid DCB entry currently "
+				"(%02X)\n", dcb_entry);
+		dcb_entry = 0x7f;	/* unused / invalid marker */
+	}
+
+	return dcb_entry;
+}
+
+static struct nouveau_i2c_chan *
+init_i2c_device_find(struct drm_device *dev, int i2c_index)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct bios_parsed_dcb *bdcb = &dev_priv->VBIOS.bdcb;
+
+	if (i2c_index == 0xff) {
+		/* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
+		int idx = dcb_entry_idx_from_crtchead(dev), shift = 0;
+		int default_indices = bdcb->i2c_default_indices;
+
+		if (idx != 0x7f && bdcb->dcb.entry[idx].i2c_upper_default)
+			shift = 4;
+
+		i2c_index = (default_indices >> shift) & 0xf;
+	}
+	if (i2c_index == 0x80)	/* g80+ */
+		i2c_index = bdcb->i2c_default_indices & 0xf;
+
+	return nouveau_i2c_find(dev, i2c_index);
+}
+
+static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
+{
+	/*
+	 * For mlv < 0x80, it is an index into a table of TMDS base addresses.
+	 * For mlv == 0x80 use the "or" value of the dcb_entry indexed by
+	 * CR58 for CR57 = 0 to index a table of offsets to the basic
+	 * 0x6808b0 address.
+	 * For mlv == 0x81 use the "or" value of the dcb_entry indexed by
+	 * CR58 for CR57 = 0 to index a table of offsets to the basic
+	 * 0x6808b0 address, and then flip the offset by 8.
+	 */
+
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	const int pramdac_offset[13] = {
+		0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 };
+	const uint32_t pramdac_table[4] = {
+		0x6808b0, 0x6808b8, 0x6828b0, 0x6828b8 };
+
+	if (mlv >= 0x80) {
+		int dcb_entry, dacoffset;
+
+		/* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
+		dcb_entry = dcb_entry_idx_from_crtchead(dev);
+		if (dcb_entry == 0x7f)
+			return 0;
+		dacoffset = pramdac_offset[
+				dev_priv->VBIOS.bdcb.dcb.entry[dcb_entry].or];
+		if (mlv == 0x81)
+			dacoffset ^= 8;
+		return 0x6808b0 + dacoffset;
+	} else {
+		if (mlv > ARRAY_SIZE(pramdac_table)) {
+			NV_ERROR(dev, "Magic Lookup Value too big (%02X)\n",
+									mlv);
+			return 0;
+		}
+		return pramdac_table[mlv];
+	}
+}
+
+static bool
+init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
+		      struct init_exec *iexec)
+{
+	/*
+	 * INIT_IO_RESTRICT_PROG   opcode: 0x32 ('2')
+	 *
+	 * offset      (8  bit): opcode
+	 * offset + 1  (16 bit): CRTC port
+	 * offset + 3  (8  bit): CRTC index
+	 * offset + 4  (8  bit): mask
+	 * offset + 5  (8  bit): shift
+	 * offset + 6  (8  bit): count
+	 * offset + 7  (32 bit): register
+	 * offset + 11 (32 bit): configuration 1
+	 * ...
+	 *
+	 * Starting at offset + 11 there are "count" 32 bit values.
+	 * To find out which value to use read index "CRTC index" on "CRTC
+	 * port", AND this value with "mask" and then bit shift right "shift"
+	 * bits.  Read the appropriate value using this index and write to
+	 * "register"
+	 */
+
+	uint16_t crtcport = ROM16(bios->data[offset + 1]);
+	uint8_t crtcindex = bios->data[offset + 3];
+	uint8_t mask = bios->data[offset + 4];
+	uint8_t shift = bios->data[offset + 5];
+	uint8_t count = bios->data[offset + 6];
+	uint32_t reg = ROM32(bios->data[offset + 7]);
+	uint8_t config;
+	uint32_t configval;
+
+	if (!iexec->execute)
+		return true;
+
+	BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
+		      "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n",
+		offset, crtcport, crtcindex, mask, shift, count, reg);
+
+	config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
+	if (config > count) {
+		NV_ERROR(bios->dev,
+			 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
+			 offset, config, count);
+		return false;
+	}
+
+	configval = ROM32(bios->data[offset + 11 + config * 4]);
+
+	BIOSLOG(bios, "0x%04X: Writing config %02X\n", offset, config);
+
+	bios_wr32(bios, reg, configval);
+
+	return true;
+}
+
+static bool
+init_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_REPEAT   opcode: 0x33 ('3')
+	 *
+	 * offset      (8 bit): opcode
+	 * offset + 1  (8 bit): count
+	 *
+	 * Execute script following this opcode up to INIT_REPEAT_END
+	 * "count" times
+	 */
+
+	uint8_t count = bios->data[offset + 1];
+	uint8_t i;
+
+	/* no iexec->execute check by design */
+
+	BIOSLOG(bios, "0x%04X: Repeating following segment %d times\n",
+		offset, count);
+
+	iexec->repeat = true;
+
+	/*
+	 * count - 1, as the script block will execute once when we leave this
+	 * opcode -- this is compatible with bios behaviour as:
+	 * a) the block is always executed at least once, even if count == 0
+	 * b) the bios interpreter skips to the op following INIT_END_REPEAT,
+	 * while we don't
+	 */
+	for (i = 0; i < count - 1; i++)
+		parse_init_table(bios, offset + 2, iexec);
+
+	iexec->repeat = false;
+
+	return true;
+}
+
+static bool
+init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
+		     struct init_exec *iexec)
+{
+	/*
+	 * INIT_IO_RESTRICT_PLL   opcode: 0x34 ('4')
+	 *
+	 * offset      (8  bit): opcode
+	 * offset + 1  (16 bit): CRTC port
+	 * offset + 3  (8  bit): CRTC index
+	 * offset + 4  (8  bit): mask
+	 * offset + 5  (8  bit): shift
+	 * offset + 6  (8  bit): IO flag condition index
+	 * offset + 7  (8  bit): count
+	 * offset + 8  (32 bit): register
+	 * offset + 12 (16 bit): frequency 1
+	 * ...
+	 *
+	 * Starting at offset + 12 there are "count" 16 bit frequencies (10kHz).
+	 * Set PLL register "register" to coefficients for frequency n,
+	 * selected by reading index "CRTC index" of "CRTC port" ANDed with
+	 * "mask" and shifted right by "shift".
+	 *
+	 * If "IO flag condition index" > 0, and condition met, double
+	 * frequency before setting it.
+	 */
+
+	uint16_t crtcport = ROM16(bios->data[offset + 1]);
+	uint8_t crtcindex = bios->data[offset + 3];
+	uint8_t mask = bios->data[offset + 4];
+	uint8_t shift = bios->data[offset + 5];
+	int8_t io_flag_condition_idx = bios->data[offset + 6];
+	uint8_t count = bios->data[offset + 7];
+	uint32_t reg = ROM32(bios->data[offset + 8]);
+	uint8_t config;
+	uint16_t freq;
+
+	if (!iexec->execute)
+		return true;
+
+	BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
+		      "Shift: 0x%02X, IO Flag Condition: 0x%02X, "
+		      "Count: 0x%02X, Reg: 0x%08X\n",
+		offset, crtcport, crtcindex, mask, shift,
+		io_flag_condition_idx, count, reg);
+
+	config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
+	if (config > count) {
+		NV_ERROR(bios->dev,
+			 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
+			 offset, config, count);
+		return false;
+	}
+
+	freq = ROM16(bios->data[offset + 12 + config * 2]);
+
+	if (io_flag_condition_idx > 0) {
+		if (io_flag_condition_met(bios, offset, io_flag_condition_idx)) {
+			BIOSLOG(bios, "0x%04X: Condition fulfilled -- "
+				      "frequency doubled\n", offset);
+			freq *= 2;
+		} else
+			BIOSLOG(bios, "0x%04X: Condition not fulfilled -- "
+				      "frequency unchanged\n", offset);
+	}
+
+	BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Config: 0x%02X, Freq: %d0kHz\n",
+		offset, reg, config, freq);
+
+	setPLL(bios, reg, freq * 10);
+
+	return true;
+}
+
+static bool
+init_end_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_END_REPEAT   opcode: 0x36 ('6')
+	 *
+	 * offset      (8 bit): opcode
+	 *
+	 * Marks the end of the block for INIT_REPEAT to repeat
+	 */
+
+	/* no iexec->execute check by design */
+
+	/*
+	 * iexec->repeat flag necessary to go past INIT_END_REPEAT opcode when
+	 * we're not in repeat mode
+	 */
+	if (iexec->repeat)
+		return false;
+
+	return true;
+}
+
+static bool
+init_copy(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_COPY   opcode: 0x37 ('7')
+	 *
+	 * offset      (8  bit): opcode
+	 * offset + 1  (32 bit): register
+	 * offset + 5  (8  bit): shift
+	 * offset + 6  (8  bit): srcmask
+	 * offset + 7  (16 bit): CRTC port
+	 * offset + 9  (8 bit): CRTC index
+	 * offset + 10  (8 bit): mask
+	 *
+	 * Read index "CRTC index" on "CRTC port", AND with "mask", OR with
+	 * (REGVAL("register") >> "shift" & "srcmask") and write-back to CRTC
+	 * port
+	 */
+
+	uint32_t reg = ROM32(bios->data[offset + 1]);
+	uint8_t shift = bios->data[offset + 5];
+	uint8_t srcmask = bios->data[offset + 6];
+	uint16_t crtcport = ROM16(bios->data[offset + 7]);
+	uint8_t crtcindex = bios->data[offset + 9];
+	uint8_t mask = bios->data[offset + 10];
+	uint32_t data;
+	uint8_t crtcdata;
+
+	if (!iexec->execute)
+		return true;
+
+	BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%02X, "
+		      "Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X\n",
+		offset, reg, shift, srcmask, crtcport, crtcindex, mask);
+
+	data = bios_rd32(bios, reg);
+
+	if (shift < 0x80)
+		data >>= shift;
+	else
+		data <<= (0x100 - shift);
+
+	data &= srcmask;
+
+	crtcdata  = bios_idxprt_rd(bios, crtcport, crtcindex) & mask;
+	crtcdata |= (uint8_t)data;
+	bios_idxprt_wr(bios, crtcport, crtcindex, crtcdata);
+
+	return true;
+}
+
+static bool
+init_not(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_NOT   opcode: 0x38 ('8')
+	 *
+	 * offset      (8  bit): opcode
+	 *
+	 * Invert the current execute / no-execute condition (i.e. "else")
+	 */
+	if (iexec->execute)
+		BIOSLOG(bios, "0x%04X: ------ Skipping following commands  ------\n", offset);
+	else
+		BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", offset);
+
+	iexec->execute = !iexec->execute;
+	return true;
+}
+
+static bool
+init_io_flag_condition(struct nvbios *bios, uint16_t offset,
+		       struct init_exec *iexec)
+{
+	/*
+	 * INIT_IO_FLAG_CONDITION   opcode: 0x39 ('9')
+	 *
+	 * offset      (8 bit): opcode
+	 * offset + 1  (8 bit): condition number
+	 *
+	 * Check condition "condition number" in the IO flag condition table.
+	 * If condition not met skip subsequent opcodes until condition is
+	 * inverted (INIT_NOT), or we hit INIT_RESUME
+	 */
+
+	uint8_t cond = bios->data[offset + 1];
+
+	if (!iexec->execute)
+		return true;
+
+	if (io_flag_condition_met(bios, offset, cond))
+		BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
+	else {
+		BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
+		iexec->execute = false;
+	}
+
+	return true;
+}
+
+static bool
+init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
+		      struct init_exec *iexec)
+{
+	/*
+	 * INIT_INDEX_ADDRESS_LATCHED   opcode: 0x49 ('I')
+	 *
+	 * offset      (8  bit): opcode
+	 * offset + 1  (32 bit): control register
+	 * offset + 5  (32 bit): data register
+	 * offset + 9  (32 bit): mask
+	 * offset + 13 (32 bit): data
+	 * offset + 17 (8  bit): count
+	 * offset + 18 (8  bit): address 1
+	 * offset + 19 (8  bit): data 1
+	 * ...
+	 *
+	 * For each of "count" address and data pairs, write "data n" to
+	 * "data register", read the current value of "control register",
+	 * and write it back once ANDed with "mask", ORed with "data",
+	 * and ORed with "address n"
+	 */
+
+	uint32_t controlreg = ROM32(bios->data[offset + 1]);
+	uint32_t datareg = ROM32(bios->data[offset + 5]);
+	uint32_t mask = ROM32(bios->data[offset + 9]);
+	uint32_t data = ROM32(bios->data[offset + 13]);
+	uint8_t count = bios->data[offset + 17];
+	uint32_t value;
+	int i;
+
+	if (!iexec->execute)
+		return true;
+
+	BIOSLOG(bios, "0x%04X: ControlReg: 0x%08X, DataReg: 0x%08X, "
+		      "Mask: 0x%08X, Data: 0x%08X, Count: 0x%02X\n",
+		offset, controlreg, datareg, mask, data, count);
+
+	for (i = 0; i < count; i++) {
+		uint8_t instaddress = bios->data[offset + 18 + i * 2];
+		uint8_t instdata = bios->data[offset + 19 + i * 2];
+
+		BIOSLOG(bios, "0x%04X: Address: 0x%02X, Data: 0x%02X\n",
+			offset, instaddress, instdata);
+
+		bios_wr32(bios, datareg, instdata);
+		value  = bios_rd32(bios, controlreg) & mask;
+		value |= data;
+		value |= instaddress;
+		bios_wr32(bios, controlreg, value);
+	}
+
+	return true;
+}
+
+static bool
+init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
+		      struct init_exec *iexec)
+{
+	/*
+	 * INIT_IO_RESTRICT_PLL2   opcode: 0x4A ('J')
+	 *
+	 * offset      (8  bit): opcode
+	 * offset + 1  (16 bit): CRTC port
+	 * offset + 3  (8  bit): CRTC index
+	 * offset + 4  (8  bit): mask
+	 * offset + 5  (8  bit): shift
+	 * offset + 6  (8  bit): count
+	 * offset + 7  (32 bit): register
+	 * offset + 11 (32 bit): frequency 1
+	 * ...
+	 *
+	 * Starting at offset + 11 there are "count" 32 bit frequencies (kHz).
+	 * Set PLL register "register" to coefficients for frequency n,
+	 * selected by reading index "CRTC index" of "CRTC port" ANDed with
+	 * "mask" and shifted right by "shift".
+	 */
+
+	uint16_t crtcport = ROM16(bios->data[offset + 1]);
+	uint8_t crtcindex = bios->data[offset + 3];
+	uint8_t mask = bios->data[offset + 4];
+	uint8_t shift = bios->data[offset + 5];
+	uint8_t count = bios->data[offset + 6];
+	uint32_t reg = ROM32(bios->data[offset + 7]);
+	uint8_t config;
+	uint32_t freq;
+
+	if (!iexec->execute)
+		return true;
+
+	BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
+		      "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n",
+		offset, crtcport, crtcindex, mask, shift, count, reg);
+
+	if (!reg)
+		return true;
+
+	config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
+	if (config > count) {
+		NV_ERROR(bios->dev,
+			 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
+			 offset, config, count);
+		return false;
+	}
+
+	freq = ROM32(bios->data[offset + 11 + config * 4]);
+
+	BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Config: 0x%02X, Freq: %dkHz\n",
+		offset, reg, config, freq);
+
+	setPLL(bios, reg, freq);
+
+	return true;
+}
+
+static bool
+init_pll2(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_PLL2   opcode: 0x4B ('K')
+	 *
+	 * offset      (8  bit): opcode
+	 * offset + 1  (32 bit): register
+	 * offset + 5  (32 bit): freq
+	 *
+	 * Set PLL register "register" to coefficients for frequency "freq"
+	 */
+
+	uint32_t reg = ROM32(bios->data[offset + 1]);
+	uint32_t freq = ROM32(bios->data[offset + 5]);
+
+	if (!iexec->execute)
+		return true;
+
+	BIOSLOG(bios, "0x%04X: Reg: 0x%04X, Freq: %dkHz\n",
+		offset, reg, freq);
+
+	setPLL(bios, reg, freq);
+	return true;
+}
+
+static bool
+init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_I2C_BYTE   opcode: 0x4C ('L')
+	 *
+	 * offset      (8 bit): opcode
+	 * offset + 1  (8 bit): DCB I2C table entry index
+	 * offset + 2  (8 bit): I2C slave address
+	 * offset + 3  (8 bit): count
+	 * offset + 4  (8 bit): I2C register 1
+	 * offset + 5  (8 bit): mask 1
+	 * offset + 6  (8 bit): data 1
+	 * ...
+	 *
+	 * For each of "count" registers given by "I2C register n" on the device
+	 * addressed by "I2C slave address" on the I2C bus given by
+	 * "DCB I2C table entry index", read the register, AND the result with
+	 * "mask n" and OR it with "data n" before writing it back to the device
+	 */
+
+	uint8_t i2c_index = bios->data[offset + 1];
+	uint8_t i2c_address = bios->data[offset + 2];
+	uint8_t count = bios->data[offset + 3];
+	struct nouveau_i2c_chan *chan;
+	struct i2c_msg msg;
+	int i;
+
+	if (!iexec->execute)
+		return true;
+
+	BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
+		      "Count: 0x%02X\n",
+		offset, i2c_index, i2c_address, count);
+
+	chan = init_i2c_device_find(bios->dev, i2c_index);
+	if (!chan)
+		return false;
+
+	for (i = 0; i < count; i++) {
+		uint8_t i2c_reg = bios->data[offset + 4 + i * 3];
+		uint8_t mask = bios->data[offset + 5 + i * 3];
+		uint8_t data = bios->data[offset + 6 + i * 3];
+		uint8_t value;
+
+		msg.addr = i2c_address;
+		msg.flags = I2C_M_RD;
+		msg.len = 1;
+		msg.buf = &value;
+		if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
+			return false;
+
+		BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
+			      "Mask: 0x%02X, Data: 0x%02X\n",
+			offset, i2c_reg, value, mask, data);
+
+		value = (value & mask) | data;
+
+		if (bios->execute) {
+			msg.addr = i2c_address;
+			msg.flags = 0;
+			msg.len = 1;
+			msg.buf = &value;
+			if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
+				return false;
+		}
+	}
+
+	return true;
+}
+
+static bool
+init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_ZM_I2C_BYTE   opcode: 0x4D ('M')
+	 *
+	 * offset      (8 bit): opcode
+	 * offset + 1  (8 bit): DCB I2C table entry index
+	 * offset + 2  (8 bit): I2C slave address
+	 * offset + 3  (8 bit): count
+	 * offset + 4  (8 bit): I2C register 1
+	 * offset + 5  (8 bit): data 1
+	 * ...
+	 *
+	 * For each of "count" registers given by "I2C register n" on the device
+	 * addressed by "I2C slave address" on the I2C bus given by
+	 * "DCB I2C table entry index", set the register to "data n"
+	 */
+
+	uint8_t i2c_index = bios->data[offset + 1];
+	uint8_t i2c_address = bios->data[offset + 2];
+	uint8_t count = bios->data[offset + 3];
+	struct nouveau_i2c_chan *chan;
+	struct i2c_msg msg;
+	int i;
+
+	if (!iexec->execute)
+		return true;
+
+	BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
+		      "Count: 0x%02X\n",
+		offset, i2c_index, i2c_address, count);
+
+	chan = init_i2c_device_find(bios->dev, i2c_index);
+	if (!chan)
+		return false;
+
+	for (i = 0; i < count; i++) {
+		uint8_t i2c_reg = bios->data[offset + 4 + i * 2];
+		uint8_t data = bios->data[offset + 5 + i * 2];
+
+		BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Data: 0x%02X\n",
+			offset, i2c_reg, data);
+
+		if (bios->execute) {
+			msg.addr = i2c_address;
+			msg.flags = 0;
+			msg.len = 1;
+			msg.buf = &data;
+			if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
+				return false;
+		}
+	}
+
+	return true;
+}
+
+static bool
+init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_ZM_I2C   opcode: 0x4E ('N')
+	 *
+	 * offset      (8 bit): opcode
+	 * offset + 1  (8 bit): DCB I2C table entry index
+	 * offset + 2  (8 bit): I2C slave address
+	 * offset + 3  (8 bit): count
+	 * offset + 4  (8 bit): data 1
+	 * ...
+	 *
+	 * Send "count" bytes ("data n") to the device addressed by "I2C slave
+	 * address" on the I2C bus given by "DCB I2C table entry index"
+	 */
+
+	uint8_t i2c_index = bios->data[offset + 1];
+	uint8_t i2c_address = bios->data[offset + 2];
+	uint8_t count = bios->data[offset + 3];
+	struct nouveau_i2c_chan *chan;
+	struct i2c_msg msg;
+	uint8_t data[256];
+	int i;
+
+	if (!iexec->execute)
+		return true;
+
+	BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
+		      "Count: 0x%02X\n",
+		offset, i2c_index, i2c_address, count);
+
+	chan = init_i2c_device_find(bios->dev, i2c_index);
+	if (!chan)
+		return false;
+
+	for (i = 0; i < count; i++) {
+		data[i] = bios->data[offset + 4 + i];
+
+		BIOSLOG(bios, "0x%04X: Data: 0x%02X\n", offset, data[i]);
+	}
+
+	if (bios->execute) {
+		msg.addr = i2c_address;
+		msg.flags = 0;
+		msg.len = count;
+		msg.buf = data;
+		if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
+			return false;
+	}
+
+	return true;
+}
+
+static bool
+init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_TMDS   opcode: 0x4F ('O')	(non-canon name)
+	 *
+	 * offset      (8 bit): opcode
+	 * offset + 1  (8 bit): magic lookup value
+	 * offset + 2  (8 bit): TMDS address
+	 * offset + 3  (8 bit): mask
+	 * offset + 4  (8 bit): data
+	 *
+	 * Read the data reg for TMDS address "TMDS address", AND it with mask
+	 * and OR it with data, then write it back
+	 * "magic lookup value" determines which TMDS base address register is
+	 * used -- see get_tmds_index_reg()
+	 */
+
+	uint8_t mlv = bios->data[offset + 1];
+	uint32_t tmdsaddr = bios->data[offset + 2];
+	uint8_t mask = bios->data[offset + 3];
+	uint8_t data = bios->data[offset + 4];
+	uint32_t reg, value;
+
+	if (!iexec->execute)
+		return true;
+
+	BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, TMDSAddr: 0x%02X, "
+		      "Mask: 0x%02X, Data: 0x%02X\n",
+		offset, mlv, tmdsaddr, mask, data);
+
+	reg = get_tmds_index_reg(bios->dev, mlv);
+	if (!reg)
+		return false;
+
+	bios_wr32(bios, reg,
+		  tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE);
+	value = (bios_rd32(bios, reg + 4) & mask) | data;
+	bios_wr32(bios, reg + 4, value);
+	bios_wr32(bios, reg, tmdsaddr);
+
+	return true;
+}
+
+static bool
+init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
+		   struct init_exec *iexec)
+{
+	/*
+	 * INIT_ZM_TMDS_GROUP   opcode: 0x50 ('P')	(non-canon name)
+	 *
+	 * offset      (8 bit): opcode
+	 * offset + 1  (8 bit): magic lookup value
+	 * offset + 2  (8 bit): count
+	 * offset + 3  (8 bit): addr 1
+	 * offset + 4  (8 bit): data 1
+	 * ...
+	 *
+	 * For each of "count" TMDS address and data pairs write "data n" to
+	 * "addr n".  "magic lookup value" determines which TMDS base address
+	 * register is used -- see get_tmds_index_reg()
+	 */
+
+	uint8_t mlv = bios->data[offset + 1];
+	uint8_t count = bios->data[offset + 2];
+	uint32_t reg;
+	int i;
+
+	if (!iexec->execute)
+		return true;
+
+	BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, Count: 0x%02X\n",
+		offset, mlv, count);
+
+	reg = get_tmds_index_reg(bios->dev, mlv);
+	if (!reg)
+		return false;
+
+	for (i = 0; i < count; i++) {
+		uint8_t tmdsaddr = bios->data[offset + 3 + i * 2];
+		uint8_t tmdsdata = bios->data[offset + 4 + i * 2];
+
+		bios_wr32(bios, reg + 4, tmdsdata);
+		bios_wr32(bios, reg, tmdsaddr);
+	}
+
+	return true;
+}
+
+static bool
+init_cr_idx_adr_latch(struct nvbios *bios, uint16_t offset,
+		      struct init_exec *iexec)
+{
+	/*
+	 * INIT_CR_INDEX_ADDRESS_LATCHED   opcode: 0x51 ('Q')
+	 *
+	 * offset      (8 bit): opcode
+	 * offset + 1  (8 bit): CRTC index1
+	 * offset + 2  (8 bit): CRTC index2
+	 * offset + 3  (8 bit): baseaddr
+	 * offset + 4  (8 bit): count
+	 * offset + 5  (8 bit): data 1
+	 * ...
+	 *
+	 * For each of "count" address and data pairs, write "baseaddr + n" to
+	 * "CRTC index1" and "data n" to "CRTC index2"
+	 * Once complete, restore initial value read from "CRTC index1"
+	 */
+	uint8_t crtcindex1 = bios->data[offset + 1];
+	uint8_t crtcindex2 = bios->data[offset + 2];
+	uint8_t baseaddr = bios->data[offset + 3];
+	uint8_t count = bios->data[offset + 4];
+	uint8_t oldaddr, data;
+	int i;
+
+	if (!iexec->execute)
+		return true;
+
+	BIOSLOG(bios, "0x%04X: Index1: 0x%02X, Index2: 0x%02X, "
+		      "BaseAddr: 0x%02X, Count: 0x%02X\n",
+		offset, crtcindex1, crtcindex2, baseaddr, count);
+
+	oldaddr = bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, crtcindex1);
+
+	for (i = 0; i < count; i++) {
+		bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1,
+				     baseaddr + i);
+		data = bios->data[offset + 5 + i];
+		bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex2, data);
+	}
+
+	bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1, oldaddr);
+
+	return true;
+}
+
+static bool
+init_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_CR   opcode: 0x52 ('R')
+	 *
+	 * offset      (8  bit): opcode
+	 * offset + 1  (8  bit): CRTC index
+	 * offset + 2  (8  bit): mask
+	 * offset + 3  (8  bit): data
+	 *
+	 * Assign the value of at "CRTC index" ANDed with mask and ORed with
+	 * data back to "CRTC index"
+	 */
+
+	uint8_t crtcindex = bios->data[offset + 1];
+	uint8_t mask = bios->data[offset + 2];
+	uint8_t data = bios->data[offset + 3];
+	uint8_t value;
+
+	if (!iexec->execute)
+		return true;
+
+	BIOSLOG(bios, "0x%04X: Index: 0x%02X, Mask: 0x%02X, Data: 0x%02X\n",
+		offset, crtcindex, mask, data);
+
+	value  = bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, crtcindex) & mask;
+	value |= data;
+	bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, value);
+
+	return true;
+}
+
+static bool
+init_zm_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_ZM_CR   opcode: 0x53 ('S')
+	 *
+	 * offset      (8 bit): opcode
+	 * offset + 1  (8 bit): CRTC index
+	 * offset + 2  (8 bit): value
+	 *
+	 * Assign "value" to CRTC register with index "CRTC index".
+	 */
+
+	uint8_t crtcindex = ROM32(bios->data[offset + 1]);
+	uint8_t data = bios->data[offset + 2];
+
+	if (!iexec->execute)
+		return true;
+
+	bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, data);
+
+	return true;
+}
+
+static bool
+init_zm_cr_group(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_ZM_CR_GROUP   opcode: 0x54 ('T')
+	 *
+	 * offset      (8 bit): opcode
+	 * offset + 1  (8 bit): count
+	 * offset + 2  (8 bit): CRTC index 1
+	 * offset + 3  (8 bit): value 1
+	 * ...
+	 *
+	 * For "count", assign "value n" to CRTC register with index
+	 * "CRTC index n".
+	 */
+
+	uint8_t count = bios->data[offset + 1];
+	int i;
+
+	if (!iexec->execute)
+		return true;
+
+	for (i = 0; i < count; i++)
+		init_zm_cr(bios, offset + 2 + 2 * i - 1, iexec);
+
+	return true;
+}
+
+static bool
+init_condition_time(struct nvbios *bios, uint16_t offset,
+		    struct init_exec *iexec)
+{
+	/*
+	 * INIT_CONDITION_TIME   opcode: 0x56 ('V')
+	 *
+	 * offset      (8 bit): opcode
+	 * offset + 1  (8 bit): condition number
+	 * offset + 2  (8 bit): retries / 50
+	 *
+	 * Check condition "condition number" in the condition table.
+	 * Bios code then sleeps for 2ms if the condition is not met, and
+	 * repeats up to "retries" times, but on one C51 this has proved
+	 * insufficient.  In mmiotraces the driver sleeps for 20ms, so we do
+	 * this, and bail after "retries" times, or 2s, whichever is less.
+	 * If still not met after retries, clear execution flag for this table.
+	 */
+
+	uint8_t cond = bios->data[offset + 1];
+	uint16_t retries = bios->data[offset + 2] * 50;
+	unsigned cnt;
+
+	if (!iexec->execute)
+		return true;
+
+	if (retries > 100)
+		retries = 100;
+
+	BIOSLOG(bios, "0x%04X: Condition: 0x%02X, Retries: 0x%02X\n",
+		offset, cond, retries);
+
+	if (!bios->execute) /* avoid 2s delays when "faking" execution */
+		retries = 1;
+
+	for (cnt = 0; cnt < retries; cnt++) {
+		if (bios_condition_met(bios, offset, cond)) {
+			BIOSLOG(bios, "0x%04X: Condition met, continuing\n",
+								offset);
+			break;
+		} else {
+			BIOSLOG(bios, "0x%04X: "
+				"Condition not met, sleeping for 20ms\n",
+								offset);
+			msleep(20);
+		}
+	}
+
+	if (!bios_condition_met(bios, offset, cond)) {
+		NV_WARN(bios->dev,
+			"0x%04X: Condition still not met after %dms, "
+			"skipping following opcodes\n", offset, 20 * retries);
+		iexec->execute = false;
+	}
+
+	return true;
+}
+
+static bool
+init_zm_reg_sequence(struct nvbios *bios, uint16_t offset,
+		     struct init_exec *iexec)
+{
+	/*
+	 * INIT_ZM_REG_SEQUENCE   opcode: 0x58 ('X')
+	 *
+	 * offset      (8  bit): opcode
+	 * offset + 1  (32 bit): base register
+	 * offset + 5  (8  bit): count
+	 * offset + 6  (32 bit): value 1
+	 * ...
+	 *
+	 * Starting at offset + 6 there are "count" 32 bit values.
+	 * For "count" iterations set "base register" + 4 * current_iteration
+	 * to "value current_iteration"
+	 */
+
+	uint32_t basereg = ROM32(bios->data[offset + 1]);
+	uint32_t count = bios->data[offset + 5];
+	int i;
+
+	if (!iexec->execute)
+		return true;
+
+	BIOSLOG(bios, "0x%04X: BaseReg: 0x%08X, Count: 0x%02X\n",
+		offset, basereg, count);
+
+	for (i = 0; i < count; i++) {
+		uint32_t reg = basereg + i * 4;
+		uint32_t data = ROM32(bios->data[offset + 6 + i * 4]);
+
+		bios_wr32(bios, reg, data);
+	}
+
+	return true;
+}
+
+static bool
+init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_SUB_DIRECT   opcode: 0x5B ('[')
+	 *
+	 * offset      (8  bit): opcode
+	 * offset + 1  (16 bit): subroutine offset (in bios)
+	 *
+	 * Calls a subroutine that will execute commands until INIT_DONE
+	 * is found.
+	 */
+
+	uint16_t sub_offset = ROM16(bios->data[offset + 1]);
+
+	if (!iexec->execute)
+		return true;
+
+	BIOSLOG(bios, "0x%04X: Executing subroutine at 0x%04X\n",
+		offset, sub_offset);
+
+	parse_init_table(bios, sub_offset, iexec);
+
+	BIOSLOG(bios, "0x%04X: End of 0x%04X subroutine\n", offset, sub_offset);
+
+	return true;
+}
+
+static bool
+init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_COPY_NV_REG   opcode: 0x5F ('_')
+	 *
+	 * offset      (8  bit): opcode
+	 * offset + 1  (32 bit): src reg
+	 * offset + 5  (8  bit): shift
+	 * offset + 6  (32 bit): src mask
+	 * offset + 10 (32 bit): xor
+	 * offset + 14 (32 bit): dst reg
+	 * offset + 18 (32 bit): dst mask
+	 *
+	 * Shift REGVAL("src reg") right by (signed) "shift", AND result with
+	 * "src mask", then XOR with "xor". Write this OR'd with
+	 * (REGVAL("dst reg") AND'd with "dst mask") to "dst reg"
+	 */
+
+	uint32_t srcreg = *((uint32_t *)(&bios->data[offset + 1]));
+	uint8_t shift = bios->data[offset + 5];
+	uint32_t srcmask = *((uint32_t *)(&bios->data[offset + 6]));
+	uint32_t xor = *((uint32_t *)(&bios->data[offset + 10]));
+	uint32_t dstreg = *((uint32_t *)(&bios->data[offset + 14]));
+	uint32_t dstmask = *((uint32_t *)(&bios->data[offset + 18]));
+	uint32_t srcvalue, dstvalue;
+
+	if (!iexec->execute)
+		return true;
+
+	BIOSLOG(bios, "0x%04X: SrcReg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%08X, "
+		      "Xor: 0x%08X, DstReg: 0x%08X, DstMask: 0x%08X\n",
+		offset, srcreg, shift, srcmask, xor, dstreg, dstmask);
+
+	srcvalue = bios_rd32(bios, srcreg);
+
+	if (shift < 0x80)
+		srcvalue >>= shift;
+	else
+		srcvalue <<= (0x100 - shift);
+
+	srcvalue = (srcvalue & srcmask) ^ xor;
+
+	dstvalue = bios_rd32(bios, dstreg) & dstmask;
+
+	bios_wr32(bios, dstreg, dstvalue | srcvalue);
+
+	return true;
+}
+
+static bool
+init_zm_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_ZM_INDEX_IO   opcode: 0x62 ('b')
+	 *
+	 * offset      (8  bit): opcode
+	 * offset + 1  (16 bit): CRTC port
+	 * offset + 3  (8  bit): CRTC index
+	 * offset + 4  (8  bit): data
+	 *
+	 * Write "data" to index "CRTC index" of "CRTC port"
+	 */
+	uint16_t crtcport = ROM16(bios->data[offset + 1]);
+	uint8_t crtcindex = bios->data[offset + 3];
+	uint8_t data = bios->data[offset + 4];
+
+	if (!iexec->execute)
+		return true;
+
+	bios_idxprt_wr(bios, crtcport, crtcindex, data);
+
+	return true;
+}
+
+static bool
+init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_COMPUTE_MEM   opcode: 0x63 ('c')
+	 *
+	 * offset      (8 bit): opcode
+	 *
+	 * This opcode is meant to set NV_PFB_CFG0 (0x100200) appropriately so
+	 * that the hardware can correctly calculate how much VRAM it has
+	 * (and subsequently report that value in NV_PFB_CSTATUS (0x10020C))
+	 *
+	 * The implementation of this opcode in general consists of two parts:
+	 * 1) determination of the memory bus width
+	 * 2) determination of how many of the card's RAM pads have ICs attached
+	 *
+	 * 1) is done by a cunning combination of writes to offsets 0x1c and
+	 * 0x3c in the framebuffer, and seeing whether the written values are
+	 * read back correctly. This then affects bits 4-7 of NV_PFB_CFG0
+	 *
+	 * 2) is done by a cunning combination of writes to an offset slightly
+	 * less than the maximum memory reported by NV_PFB_CSTATUS, then seeing
+	 * if the test pattern can be read back. This then affects bits 12-15 of
+	 * NV_PFB_CFG0
+	 *
+	 * In this context a "cunning combination" may include multiple reads
+	 * and writes to varying locations, often alternating the test pattern
+	 * and 0, doubtless to make sure buffers are filled, residual charges
+	 * on tracks are removed etc.
+	 *
+	 * Unfortunately, the "cunning combination"s mentioned above, and the
+	 * changes to the bits in NV_PFB_CFG0 differ with nearly every bios
+	 * trace I have.
+	 *
+	 * Therefore, we cheat and assume the value of NV_PFB_CFG0 with which
+	 * we started was correct, and use that instead
+	 */
+
+	/* no iexec->execute check by design */
+
+	/*
+	 * This appears to be a NOP on G8x chipsets, both io logs of the VBIOS
+	 * and kmmio traces of the binary driver POSTing the card show nothing
+	 * being done for this opcode.  why is it still listed in the table?!
+	 */
+
+	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+
+	if (dev_priv->card_type >= NV_50)
+		return true;
+
+	/*
+	 * On every card I've seen, this step gets done for us earlier in
+	 * the init scripts
+	uint8_t crdata = bios_idxprt_rd(dev, NV_VIO_SRX, 0x01);
+	bios_idxprt_wr(dev, NV_VIO_SRX, 0x01, crdata | 0x20);
+	 */
+
+	/*
+	 * This also has probably been done in the scripts, but an mmio trace of
+	 * s3 resume shows nvidia doing it anyway (unlike the NV_VIO_SRX write)
+	 */
+	bios_wr32(bios, NV_PFB_REFCTRL, NV_PFB_REFCTRL_VALID_1);
+
+	/* write back the saved configuration value */
+	bios_wr32(bios, NV_PFB_CFG0, bios->state.saved_nv_pfb_cfg0);
+
+	return true;
+}
+
+static bool
+init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_RESET   opcode: 0x65 ('e')
+	 *
+	 * offset      (8  bit): opcode
+	 * offset + 1  (32 bit): register
+	 * offset + 5  (32 bit): value1
+	 * offset + 9  (32 bit): value2
+	 *
+	 * Assign "value1" to "register", then assign "value2" to "register"
+	 */
+
+	uint32_t reg = ROM32(bios->data[offset + 1]);
+	uint32_t value1 = ROM32(bios->data[offset + 5]);
+	uint32_t value2 = ROM32(bios->data[offset + 9]);
+	uint32_t pci_nv_19, pci_nv_20;
+
+	/* no iexec->execute check by design */
+
+	pci_nv_19 = bios_rd32(bios, NV_PBUS_PCI_NV_19);
+	bios_wr32(bios, NV_PBUS_PCI_NV_19, 0);
+	bios_wr32(bios, reg, value1);
+
+	udelay(10);
+
+	bios_wr32(bios, reg, value2);
+	bios_wr32(bios, NV_PBUS_PCI_NV_19, pci_nv_19);
+
+	pci_nv_20 = bios_rd32(bios, NV_PBUS_PCI_NV_20);
+	pci_nv_20 &= ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED;	/* 0xfffffffe */
+	bios_wr32(bios, NV_PBUS_PCI_NV_20, pci_nv_20);
+
+	return true;
+}
+
+static bool
+init_configure_mem(struct nvbios *bios, uint16_t offset,
+		   struct init_exec *iexec)
+{
+	/*
+	 * INIT_CONFIGURE_MEM   opcode: 0x66 ('f')
+	 *
+	 * offset      (8 bit): opcode
+	 *
+	 * Equivalent to INIT_DONE on bios version 3 or greater.
+	 * For early bios versions, sets up the memory registers, using values
+	 * taken from the memory init table
+	 */
+
+	/* no iexec->execute check by design */
+
+	uint16_t meminitoffs = bios->legacy.mem_init_tbl_ptr + MEM_INIT_SIZE * (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_SCRATCH4__INDEX) >> 4);
+	uint16_t seqtbloffs = bios->legacy.sdr_seq_tbl_ptr, meminitdata = meminitoffs + 6;
+	uint32_t reg, data;
+
+	if (bios->major_version > 2)
+		return false;
+
+	bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd(
+		       bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20);
+
+	if (bios->data[meminitoffs] & 1)
+		seqtbloffs = bios->legacy.ddr_seq_tbl_ptr;
+
+	for (reg = ROM32(bios->data[seqtbloffs]);
+	     reg != 0xffffffff;
+	     reg = ROM32(bios->data[seqtbloffs += 4])) {
+
+		switch (reg) {
+		case NV_PFB_PRE:
+			data = NV_PFB_PRE_CMD_PRECHARGE;
+			break;
+		case NV_PFB_PAD:
+			data = NV_PFB_PAD_CKE_NORMAL;
+			break;
+		case NV_PFB_REF:
+			data = NV_PFB_REF_CMD_REFRESH;
+			break;
+		default:
+			data = ROM32(bios->data[meminitdata]);
+			meminitdata += 4;
+			if (data == 0xffffffff)
+				continue;
+		}
+
+		bios_wr32(bios, reg, data);
+	}
+
+	return true;
+}
+
+static bool
+init_configure_clk(struct nvbios *bios, uint16_t offset,
+		   struct init_exec *iexec)
+{
+	/*
+	 * INIT_CONFIGURE_CLK   opcode: 0x67 ('g')
+	 *
+	 * offset      (8 bit): opcode
+	 *
+	 * Equivalent to INIT_DONE on bios version 3 or greater.
+	 * For early bios versions, sets up the NVClk and MClk PLLs, using
+	 * values taken from the memory init table
+	 */
+
+	/* no iexec->execute check by design */
+
+	uint16_t meminitoffs = bios->legacy.mem_init_tbl_ptr + MEM_INIT_SIZE * (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_SCRATCH4__INDEX) >> 4);
+	int clock;
+
+	if (bios->major_version > 2)
+		return false;
+
+	clock = ROM16(bios->data[meminitoffs + 4]) * 10;
+	setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock);
+
+	clock = ROM16(bios->data[meminitoffs + 2]) * 10;
+	if (bios->data[meminitoffs] & 1) /* DDR */
+		clock *= 2;
+	setPLL(bios, NV_PRAMDAC_MPLL_COEFF, clock);
+
+	return true;
+}
+
+static bool
+init_configure_preinit(struct nvbios *bios, uint16_t offset,
+		       struct init_exec *iexec)
+{
+	/*
+	 * INIT_CONFIGURE_PREINIT   opcode: 0x68 ('h')
+	 *
+	 * offset      (8 bit): opcode
+	 *
+	 * Equivalent to INIT_DONE on bios version 3 or greater.
+	 * For early bios versions, does early init, loading ram and crystal
+	 * configuration from straps into CR3C
+	 */
+
+	/* no iexec->execute check by design */
+
+	uint32_t straps = bios_rd32(bios, NV_PEXTDEV_BOOT_0);
+	uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6));
+
+	if (bios->major_version > 2)
+		return false;
+
+	bios_idxprt_wr(bios, NV_CIO_CRX__COLOR,
+			     NV_CIO_CRE_SCRATCH4__INDEX, cr3c);
+
+	return true;
+}
+
+static bool
+init_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_IO   opcode: 0x69 ('i')
+	 *
+	 * offset      (8  bit): opcode
+	 * offset + 1  (16 bit): CRTC port
+	 * offset + 3  (8  bit): mask
+	 * offset + 4  (8  bit): data
+	 *
+	 * Assign ((IOVAL("crtc port") & "mask") | "data") to "crtc port"
+	 */
+
+	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+	uint16_t crtcport = ROM16(bios->data[offset + 1]);
+	uint8_t mask = bios->data[offset + 3];
+	uint8_t data = bios->data[offset + 4];
+
+	if (!iexec->execute)
+		return true;
+
+	BIOSLOG(bios, "0x%04X: Port: 0x%04X, Mask: 0x%02X, Data: 0x%02X\n",
+		offset, crtcport, mask, data);
+
+	/*
+	 * I have no idea what this does, but NVIDIA do this magic sequence
+	 * in the places where this INIT_IO happens..
+	 */
+	if (dev_priv->card_type >= NV_50 && crtcport == 0x3c3 && data == 1) {
+		int i;
+
+		bios_wr32(bios, 0x614100, (bios_rd32(
+			  bios, 0x614100) & 0x0fffffff) | 0x00800000);
+
+		bios_wr32(bios, 0x00e18c, bios_rd32(
+			  bios, 0x00e18c) | 0x00020000);
+
+		bios_wr32(bios, 0x614900, (bios_rd32(
+			  bios, 0x614900) & 0x0fffffff) | 0x00800000);
+
+		bios_wr32(bios, 0x000200, bios_rd32(
+			  bios, 0x000200) & ~0x40000000);
+
+		mdelay(10);
+
+		bios_wr32(bios, 0x00e18c, bios_rd32(
+			  bios, 0x00e18c) & ~0x00020000);
+
+		bios_wr32(bios, 0x000200, bios_rd32(
+			  bios, 0x000200) | 0x40000000);
+
+		bios_wr32(bios, 0x614100, 0x00800018);
+		bios_wr32(bios, 0x614900, 0x00800018);
+
+		mdelay(10);
+
+		bios_wr32(bios, 0x614100, 0x10000018);
+		bios_wr32(bios, 0x614900, 0x10000018);
+
+		for (i = 0; i < 3; i++)
+			bios_wr32(bios, 0x614280 + (i*0x800), bios_rd32(
+				  bios, 0x614280 + (i*0x800)) & 0xf0f0f0f0);
+
+		for (i = 0; i < 2; i++)
+			bios_wr32(bios, 0x614300 + (i*0x800), bios_rd32(
+				  bios, 0x614300 + (i*0x800)) & 0xfffff0f0);
+
+		for (i = 0; i < 3; i++)
+			bios_wr32(bios, 0x614380 + (i*0x800), bios_rd32(
+				  bios, 0x614380 + (i*0x800)) & 0xfffff0f0);
+
+		for (i = 0; i < 2; i++)
+			bios_wr32(bios, 0x614200 + (i*0x800), bios_rd32(
+				  bios, 0x614200 + (i*0x800)) & 0xfffffff0);
+
+		for (i = 0; i < 2; i++)
+			bios_wr32(bios, 0x614108 + (i*0x800), bios_rd32(
+				  bios, 0x614108 + (i*0x800)) & 0x0fffffff);
+		return true;
+	}
+
+	bios_port_wr(bios, crtcport, (bios_port_rd(bios, crtcport) & mask) |
+									data);
+	return true;
+}
+
+static bool
+init_sub(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_SUB   opcode: 0x6B ('k')
+	 *
+	 * offset      (8 bit): opcode
+	 * offset + 1  (8 bit): script number
+	 *
+	 * Execute script number "script number", as a subroutine
+	 */
+
+	uint8_t sub = bios->data[offset + 1];
+
+	if (!iexec->execute)
+		return true;
+
+	BIOSLOG(bios, "0x%04X: Calling script %d\n", offset, sub);
+
+	parse_init_table(bios,
+			 ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]),
+			 iexec);
+
+	BIOSLOG(bios, "0x%04X: End of script %d\n", offset, sub);
+
+	return true;
+}
+
+static bool
+init_ram_condition(struct nvbios *bios, uint16_t offset,
+		   struct init_exec *iexec)
+{
+	/*
+	 * INIT_RAM_CONDITION   opcode: 0x6D ('m')
+	 *
+	 * offset      (8 bit): opcode
+	 * offset + 1  (8 bit): mask
+	 * offset + 2  (8 bit): cmpval
+	 *
+	 * Test if (NV_PFB_BOOT_0 & "mask") equals "cmpval".
+	 * If condition not met skip subsequent opcodes until condition is
+	 * inverted (INIT_NOT), or we hit INIT_RESUME
+	 */
+
+	uint8_t mask = bios->data[offset + 1];
+	uint8_t cmpval = bios->data[offset + 2];
+	uint8_t data;
+
+	if (!iexec->execute)
+		return true;
+
+	data = bios_rd32(bios, NV_PFB_BOOT_0) & mask;
+
+	BIOSLOG(bios, "0x%04X: Checking if 0x%08X equals 0x%08X\n",
+		offset, data, cmpval);
+
+	if (data == cmpval)
+		BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
+	else {
+		BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
+		iexec->execute = false;
+	}
+
+	return true;
+}
+
+static bool
+init_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_NV_REG   opcode: 0x6E ('n')
+	 *
+	 * offset      (8  bit): opcode
+	 * offset + 1  (32 bit): register
+	 * offset + 5  (32 bit): mask
+	 * offset + 9  (32 bit): data
+	 *
+	 * Assign ((REGVAL("register") & "mask") | "data") to "register"
+	 */
+
+	uint32_t reg = ROM32(bios->data[offset + 1]);
+	uint32_t mask = ROM32(bios->data[offset + 5]);
+	uint32_t data = ROM32(bios->data[offset + 9]);
+
+	if (!iexec->execute)
+		return true;
+
+	BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Mask: 0x%08X, Data: 0x%08X\n",
+		offset, reg, mask, data);
+
+	bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | data);
+
+	return true;
+}
+
+static bool
+init_macro(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_MACRO   opcode: 0x6F ('o')
+	 *
+	 * offset      (8 bit): opcode
+	 * offset + 1  (8 bit): macro number
+	 *
+	 * Look up macro index "macro number" in the macro index table.
+	 * The macro index table entry has 1 byte for the index in the macro
+	 * table, and 1 byte for the number of times to repeat the macro.
+	 * The macro table entry has 4 bytes for the register address and
+	 * 4 bytes for the value to write to that register
+	 */
+
+	uint8_t macro_index_tbl_idx = bios->data[offset + 1];
+	uint16_t tmp = bios->macro_index_tbl_ptr + (macro_index_tbl_idx * MACRO_INDEX_SIZE);
+	uint8_t macro_tbl_idx = bios->data[tmp];
+	uint8_t count = bios->data[tmp + 1];
+	uint32_t reg, data;
+	int i;
+
+	if (!iexec->execute)
+		return true;
+
+	BIOSLOG(bios, "0x%04X: Macro: 0x%02X, MacroTableIndex: 0x%02X, "
+		      "Count: 0x%02X\n",
+		offset, macro_index_tbl_idx, macro_tbl_idx, count);
+
+	for (i = 0; i < count; i++) {
+		uint16_t macroentryptr = bios->macro_tbl_ptr + (macro_tbl_idx + i) * MACRO_SIZE;
+
+		reg = ROM32(bios->data[macroentryptr]);
+		data = ROM32(bios->data[macroentryptr + 4]);
+
+		bios_wr32(bios, reg, data);
+	}
+
+	return true;
+}
+
+static bool
+init_done(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_DONE   opcode: 0x71 ('q')
+	 *
+	 * offset      (8  bit): opcode
+	 *
+	 * End the current script
+	 */
+
+	/* mild retval abuse to stop parsing this table */
+	return false;
+}
+
+static bool
+init_resume(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_RESUME   opcode: 0x72 ('r')
+	 *
+	 * offset      (8  bit): opcode
+	 *
+	 * End the current execute / no-execute condition
+	 */
+
+	if (iexec->execute)
+		return true;
+
+	iexec->execute = true;
+	BIOSLOG(bios, "0x%04X: ---- Executing following commands ----\n", offset);
+
+	return true;
+}
+
+static bool
+init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_TIME   opcode: 0x74 ('t')
+	 *
+	 * offset      (8  bit): opcode
+	 * offset + 1  (16 bit): time
+	 *
+	 * Sleep for "time" microseconds.
+	 */
+
+	unsigned time = ROM16(bios->data[offset + 1]);
+
+	if (!iexec->execute)
+		return true;
+
+	BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X microseconds\n",
+		offset, time);
+
+	if (time < 1000)
+		udelay(time);
+	else
+		msleep((time + 900) / 1000);
+
+	return true;
+}
+
+static bool
+init_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_CONDITION   opcode: 0x75 ('u')
+	 *
+	 * offset      (8 bit): opcode
+	 * offset + 1  (8 bit): condition number
+	 *
+	 * Check condition "condition number" in the condition table.
+	 * If condition not met skip subsequent opcodes until condition is
+	 * inverted (INIT_NOT), or we hit INIT_RESUME
+	 */
+
+	uint8_t cond = bios->data[offset + 1];
+
+	if (!iexec->execute)
+		return true;
+
+	BIOSLOG(bios, "0x%04X: Condition: 0x%02X\n", offset, cond);
+
+	if (bios_condition_met(bios, offset, cond))
+		BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
+	else {
+		BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
+		iexec->execute = false;
+	}
+
+	return true;
+}
+
+static bool
+init_io_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_IO_CONDITION  opcode: 0x76
+	 *
+	 * offset      (8 bit): opcode
+	 * offset + 1  (8 bit): condition number
+	 *
+	 * Check condition "condition number" in the io condition table.
+	 * If condition not met skip subsequent opcodes until condition is
+	 * inverted (INIT_NOT), or we hit INIT_RESUME
+	 */
+
+	uint8_t cond = bios->data[offset + 1];
+
+	if (!iexec->execute)
+		return true;
+
+	BIOSLOG(bios, "0x%04X: IO condition: 0x%02X\n", offset, cond);
+
+	if (io_condition_met(bios, offset, cond))
+		BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
+	else {
+		BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
+		iexec->execute = false;
+	}
+
+	return true;
+}
+
+static bool
+init_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_INDEX_IO   opcode: 0x78 ('x')
+	 *
+	 * offset      (8  bit): opcode
+	 * offset + 1  (16 bit): CRTC port
+	 * offset + 3  (8  bit): CRTC index
+	 * offset + 4  (8  bit): mask
+	 * offset + 5  (8  bit): data
+	 *
+	 * Read value at index "CRTC index" on "CRTC port", AND with "mask",
+	 * OR with "data", write-back
+	 */
+
+	uint16_t crtcport = ROM16(bios->data[offset + 1]);
+	uint8_t crtcindex = bios->data[offset + 3];
+	uint8_t mask = bios->data[offset + 4];
+	uint8_t data = bios->data[offset + 5];
+	uint8_t value;
+
+	if (!iexec->execute)
+		return true;
+
+	BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
+		      "Data: 0x%02X\n",
+		offset, crtcport, crtcindex, mask, data);
+
+	value = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) | data;
+	bios_idxprt_wr(bios, crtcport, crtcindex, value);
+
+	return true;
+}
+
+static bool
+init_pll(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_PLL   opcode: 0x79 ('y')
+	 *
+	 * offset      (8  bit): opcode
+	 * offset + 1  (32 bit): register
+	 * offset + 5  (16 bit): freq
+	 *
+	 * Set PLL register "register" to coefficients for frequency (10kHz)
+	 * "freq"
+	 */
+
+	uint32_t reg = ROM32(bios->data[offset + 1]);
+	uint16_t freq = ROM16(bios->data[offset + 5]);
+
+	if (!iexec->execute)
+		return true;
+
+	BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Freq: %d0kHz\n", offset, reg, freq);
+
+	setPLL(bios, reg, freq * 10);
+
+	return true;
+}
+
+static bool
+init_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_ZM_REG   opcode: 0x7A ('z')
+	 *
+	 * offset      (8  bit): opcode
+	 * offset + 1  (32 bit): register
+	 * offset + 5  (32 bit): value
+	 *
+	 * Assign "value" to "register"
+	 */
+
+	uint32_t reg = ROM32(bios->data[offset + 1]);
+	uint32_t value = ROM32(bios->data[offset + 5]);
+
+	if (!iexec->execute)
+		return true;
+
+	if (reg == 0x000200)
+		value |= 1;
+
+	bios_wr32(bios, reg, value);
+
+	return true;
+}
+
+static bool
+init_ram_restrict_pll(struct nvbios *bios, uint16_t offset,
+		      struct init_exec *iexec)
+{
+	/*
+	 * INIT_RAM_RESTRICT_PLL   opcode: 0x87 ('')
+	 *
+	 * offset      (8 bit): opcode
+	 * offset + 1  (8 bit): PLL type
+	 * offset + 2 (32 bit): frequency 0
+	 *
+	 * Uses the RAMCFG strap of PEXTDEV_BOOT as an index into the table at
+	 * ram_restrict_table_ptr.  The value read from there is used to select
+	 * a frequency from the table starting at 'frequency 0' to be
+	 * programmed into the PLL corresponding to 'type'.
+	 *
+	 * The PLL limits table on cards using this opcode has a mapping of
+	 * 'type' to the relevant registers.
+	 */
+
+	struct drm_device *dev = bios->dev;
+	uint32_t strap = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) & 0x0000003c) >> 2;
+	uint8_t index = bios->data[bios->ram_restrict_tbl_ptr + strap];
+	uint8_t type = bios->data[offset + 1];
+	uint32_t freq = ROM32(bios->data[offset + 2 + (index * 4)]);
+	uint8_t *pll_limits = &bios->data[bios->pll_limit_tbl_ptr], *entry;
+	int i;
+
+	if (!iexec->execute)
+		return true;
+
+	if (!bios->pll_limit_tbl_ptr || (pll_limits[0] & 0xf0) != 0x30) {
+		NV_ERROR(dev, "PLL limits table not version 3.x\n");
+		return true; /* deliberate, allow default clocks to remain */
+	}
+
+	entry = pll_limits + pll_limits[1];
+	for (i = 0; i < pll_limits[3]; i++, entry += pll_limits[2]) {
+		if (entry[0] == type) {
+			uint32_t reg = ROM32(entry[3]);
+
+			BIOSLOG(bios, "0x%04X: "
+				      "Type %02x Reg 0x%08x Freq %dKHz\n",
+				offset, type, reg, freq);
+
+			setPLL(bios, reg, freq);
+			return true;
+		}
+	}
+
+	NV_ERROR(dev, "PLL type 0x%02x not found in PLL limits table", type);
+	return true;
+}
+
+static bool
+init_8c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_8C   opcode: 0x8C ('')
+	 *
+	 * NOP so far....
+	 *
+	 */
+
+	return true;
+}
+
+static bool
+init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_8D   opcode: 0x8D ('')
+	 *
+	 * NOP so far....
+	 *
+	 */
+
+	return true;
+}
+
+static bool
+init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_GPIO   opcode: 0x8E ('')
+	 *
+	 * offset      (8 bit): opcode
+	 *
+	 * Loop over all entries in the DCB GPIO table, and initialise
+	 * each GPIO according to various values listed in each entry
+	 */
+
+	const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
+	const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
+	const uint8_t *gpio_table = &bios->data[bios->bdcb.gpio_table_ptr];
+	const uint8_t *gpio_entry;
+	int i;
+
+	if (bios->bdcb.version != 0x40) {
+		NV_ERROR(bios->dev, "DCB table not version 4.0\n");
+		return false;
+	}
+
+	if (!bios->bdcb.gpio_table_ptr) {
+		NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n");
+		return false;
+	}
+
+	gpio_entry = gpio_table + gpio_table[1];
+	for (i = 0; i < gpio_table[2]; i++, gpio_entry += gpio_table[3]) {
+		uint32_t entry = ROM32(gpio_entry[0]), r, s, v;
+		int line = (entry & 0x0000001f);
+
+		BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, entry);
+
+		if ((entry & 0x0000ff00) == 0x0000ff00)
+			continue;
+
+		r = nv50_gpio_reg[line >> 3];
+		s = (line & 0x07) << 2;
+		v = bios_rd32(bios, r) & ~(0x00000003 << s);
+		if (entry & 0x01000000)
+			v |= (((entry & 0x60000000) >> 29) ^ 2) << s;
+		else
+			v |= (((entry & 0x18000000) >> 27) ^ 2) << s;
+		bios_wr32(bios, r, v);
+
+		r = nv50_gpio_ctl[line >> 4];
+		s = (line & 0x0f);
+		v = bios_rd32(bios, r) & ~(0x00010001 << s);
+		switch ((entry & 0x06000000) >> 25) {
+		case 1:
+			v |= (0x00000001 << s);
+			break;
+		case 2:
+			v |= (0x00010000 << s);
+			break;
+		default:
+			break;
+		}
+		bios_wr32(bios, r, v);
+	}
+
+	return true;
+}
+
+/* hack to avoid moving the itbl_entry array before this function */
+int init_ram_restrict_zm_reg_group_blocklen;
+
+static bool
+init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
+			       struct init_exec *iexec)
+{
+	/*
+	 * INIT_RAM_RESTRICT_ZM_REG_GROUP   opcode: 0x8F ('')
+	 *
+	 * offset      (8  bit): opcode
+	 * offset + 1  (32 bit): reg
+	 * offset + 5  (8  bit): regincrement
+	 * offset + 6  (8  bit): count
+	 * offset + 7  (32 bit): value 1,1
+	 * ...
+	 *
+	 * Use the RAMCFG strap of PEXTDEV_BOOT as an index into the table at
+	 * ram_restrict_table_ptr. The value read from here is 'n', and
+	 * "value 1,n" gets written to "reg". This repeats "count" times and on
+	 * each iteration 'm', "reg" increases by "regincrement" and
+	 * "value m,n" is used. The extent of n is limited by a number read
+	 * from the 'M' BIT table, herein called "blocklen"
+	 */
+
+	uint32_t reg = ROM32(bios->data[offset + 1]);
+	uint8_t regincrement = bios->data[offset + 5];
+	uint8_t count = bios->data[offset + 6];
+	uint32_t strap_ramcfg, data;
+	uint16_t blocklen;
+	uint8_t index;
+	int i;
+
+	/* previously set by 'M' BIT table */
+	blocklen = init_ram_restrict_zm_reg_group_blocklen;
+
+	if (!iexec->execute)
+		return true;
+
+	if (!blocklen) {
+		NV_ERROR(bios->dev,
+			 "0x%04X: Zero block length - has the M table "
+			 "been parsed?\n", offset);
+		return false;
+	}
+
+	strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf;
+	index = bios->data[bios->ram_restrict_tbl_ptr + strap_ramcfg];
+
+	BIOSLOG(bios, "0x%04X: Reg: 0x%08X, RegIncrement: 0x%02X, "
+		      "Count: 0x%02X, StrapRamCfg: 0x%02X, Index: 0x%02X\n",
+		offset, reg, regincrement, count, strap_ramcfg, index);
+
+	for (i = 0; i < count; i++) {
+		data = ROM32(bios->data[offset + 7 + index * 4 + blocklen * i]);
+
+		bios_wr32(bios, reg, data);
+
+		reg += regincrement;
+	}
+
+	return true;
+}
+
+static bool
+init_copy_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_COPY_ZM_REG   opcode: 0x90 ('')
+	 *
+	 * offset      (8  bit): opcode
+	 * offset + 1  (32 bit): src reg
+	 * offset + 5  (32 bit): dst reg
+	 *
+	 * Put contents of "src reg" into "dst reg"
+	 */
+
+	uint32_t srcreg = ROM32(bios->data[offset + 1]);
+	uint32_t dstreg = ROM32(bios->data[offset + 5]);
+
+	if (!iexec->execute)
+		return true;
+
+	bios_wr32(bios, dstreg, bios_rd32(bios, srcreg));
+
+	return true;
+}
+
+static bool
+init_zm_reg_group_addr_latched(struct nvbios *bios, uint16_t offset,
+			       struct init_exec *iexec)
+{
+	/*
+	 * INIT_ZM_REG_GROUP_ADDRESS_LATCHED   opcode: 0x91 ('')
+	 *
+	 * offset      (8  bit): opcode
+	 * offset + 1  (32 bit): dst reg
+	 * offset + 5  (8  bit): count
+	 * offset + 6  (32 bit): data 1
+	 * ...
+	 *
+	 * For each of "count" values write "data n" to "dst reg"
+	 */
+
+	uint32_t reg = ROM32(bios->data[offset + 1]);
+	uint8_t count = bios->data[offset + 5];
+	int i;
+
+	if (!iexec->execute)
+		return true;
+
+	for (i = 0; i < count; i++) {
+		uint32_t data = ROM32(bios->data[offset + 6 + 4 * i]);
+		bios_wr32(bios, reg, data);
+	}
+
+	return true;
+}
+
+static bool
+init_reserved(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_RESERVED   opcode: 0x92 ('')
+	 *
+	 * offset      (8 bit): opcode
+	 *
+	 * Seemingly does nothing
+	 */
+
+	return true;
+}
+
+static bool
+init_96(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_96   opcode: 0x96 ('')
+	 *
+	 * offset      (8  bit): opcode
+	 * offset + 1  (32 bit): sreg
+	 * offset + 5  (8  bit): sshift
+	 * offset + 6  (8  bit): smask
+	 * offset + 7  (8  bit): index
+	 * offset + 8  (32 bit): reg
+	 * offset + 12 (32 bit): mask
+	 * offset + 16 (8  bit): shift
+	 *
+	 */
+
+	uint16_t xlatptr = bios->init96_tbl_ptr + (bios->data[offset + 7] * 2);
+	uint32_t reg = ROM32(bios->data[offset + 8]);
+	uint32_t mask = ROM32(bios->data[offset + 12]);
+	uint32_t val;
+
+	val = bios_rd32(bios, ROM32(bios->data[offset + 1]));
+	if (bios->data[offset + 5] < 0x80)
+		val >>= bios->data[offset + 5];
+	else
+		val <<= (0x100 - bios->data[offset + 5]);
+	val &= bios->data[offset + 6];
+
+	val   = bios->data[ROM16(bios->data[xlatptr]) + val];
+	val <<= bios->data[offset + 16];
+
+	if (!iexec->execute)
+		return true;
+
+	bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | val);
+	return true;
+}
+
+static bool
+init_97(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_97   opcode: 0x97 ('')
+	 *
+	 * offset      (8  bit): opcode
+	 * offset + 1  (32 bit): register
+	 * offset + 5  (32 bit): mask
+	 * offset + 9  (32 bit): value
+	 *
+	 * Adds "value" to "register" preserving the fields specified
+	 * by "mask"
+	 */
+
+	uint32_t reg = ROM32(bios->data[offset + 1]);
+	uint32_t mask = ROM32(bios->data[offset + 5]);
+	uint32_t add = ROM32(bios->data[offset + 9]);
+	uint32_t val;
+
+	val = bios_rd32(bios, reg);
+	val = (val & mask) | ((val + add) & ~mask);
+
+	if (!iexec->execute)
+		return true;
+
+	bios_wr32(bios, reg, val);
+	return true;
+}
+
+static bool
+init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_AUXCH   opcode: 0x98 ('')
+	 *
+	 * offset      (8  bit): opcode
+	 * offset + 1  (32 bit): address
+	 * offset + 5  (8  bit): count
+	 * offset + 6  (8  bit): mask 0
+	 * offset + 7  (8  bit): data 0
+	 *  ...
+	 *
+	 */
+
+	struct drm_device *dev = bios->dev;
+	struct nouveau_i2c_chan *auxch;
+	uint32_t addr = ROM32(bios->data[offset + 1]);
+	uint8_t len = bios->data[offset + 5];
+	int ret, i;
+
+	if (!bios->display.output) {
+		NV_ERROR(dev, "INIT_AUXCH: no active output\n");
+		return false;
+	}
+
+	auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
+	if (!auxch) {
+		NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n",
+			 bios->display.output->i2c_index);
+		return false;
+	}
+
+	if (!iexec->execute)
+		return true;
+
+	offset += 6;
+	for (i = 0; i < len; i++, offset += 2) {
+		uint8_t data;
+
+		ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1);
+		if (ret) {
+			NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret);
+			return false;
+		}
+
+		data &= bios->data[offset + 0];
+		data |= bios->data[offset + 1];
+
+		ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1);
+		if (ret) {
+			NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret);
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static bool
+init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+	/*
+	 * INIT_ZM_AUXCH   opcode: 0x99 ('')
+	 *
+	 * offset      (8  bit): opcode
+	 * offset + 1  (32 bit): address
+	 * offset + 5  (8  bit): count
+	 * offset + 6  (8  bit): data 0
+	 *  ...
+	 *
+	 */
+
+	struct drm_device *dev = bios->dev;
+	struct nouveau_i2c_chan *auxch;
+	uint32_t addr = ROM32(bios->data[offset + 1]);
+	uint8_t len = bios->data[offset + 5];
+	int ret, i;
+
+	if (!bios->display.output) {
+		NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n");
+		return false;
+	}
+
+	auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
+	if (!auxch) {
+		NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n",
+			 bios->display.output->i2c_index);
+		return false;
+	}
+
+	if (!iexec->execute)
+		return true;
+
+	offset += 6;
+	for (i = 0; i < len; i++, offset++) {
+		ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1);
+		if (ret) {
+			NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret);
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static struct init_tbl_entry itbl_entry[] = {
+	/* command name                       , id  , length  , offset  , mult    , command handler                 */
+	/* INIT_PROG (0x31, 15, 10, 4) removed due to no example of use */
+	{ "INIT_IO_RESTRICT_PROG"             , 0x32, 11      , 6       , 4       , init_io_restrict_prog           },
+	{ "INIT_REPEAT"                       , 0x33, 2       , 0       , 0       , init_repeat                     },
+	{ "INIT_IO_RESTRICT_PLL"              , 0x34, 12      , 7       , 2       , init_io_restrict_pll            },
+	{ "INIT_END_REPEAT"                   , 0x36, 1       , 0       , 0       , init_end_repeat                 },
+	{ "INIT_COPY"                         , 0x37, 11      , 0       , 0       , init_copy                       },
+	{ "INIT_NOT"                          , 0x38, 1       , 0       , 0       , init_not                        },
+	{ "INIT_IO_FLAG_CONDITION"            , 0x39, 2       , 0       , 0       , init_io_flag_condition          },
+	{ "INIT_INDEX_ADDRESS_LATCHED"        , 0x49, 18      , 17      , 2       , init_idx_addr_latched           },
+	{ "INIT_IO_RESTRICT_PLL2"             , 0x4A, 11      , 6       , 4       , init_io_restrict_pll2           },
+	{ "INIT_PLL2"                         , 0x4B, 9       , 0       , 0       , init_pll2                       },
+	{ "INIT_I2C_BYTE"                     , 0x4C, 4       , 3       , 3       , init_i2c_byte                   },
+	{ "INIT_ZM_I2C_BYTE"                  , 0x4D, 4       , 3       , 2       , init_zm_i2c_byte                },
+	{ "INIT_ZM_I2C"                       , 0x4E, 4       , 3       , 1       , init_zm_i2c                     },
+	{ "INIT_TMDS"                         , 0x4F, 5       , 0       , 0       , init_tmds                       },
+	{ "INIT_ZM_TMDS_GROUP"                , 0x50, 3       , 2       , 2       , init_zm_tmds_group              },
+	{ "INIT_CR_INDEX_ADDRESS_LATCHED"     , 0x51, 5       , 4       , 1       , init_cr_idx_adr_latch           },
+	{ "INIT_CR"                           , 0x52, 4       , 0       , 0       , init_cr                         },
+	{ "INIT_ZM_CR"                        , 0x53, 3       , 0       , 0       , init_zm_cr                      },
+	{ "INIT_ZM_CR_GROUP"                  , 0x54, 2       , 1       , 2       , init_zm_cr_group                },
+	{ "INIT_CONDITION_TIME"               , 0x56, 3       , 0       , 0       , init_condition_time             },
+	{ "INIT_ZM_REG_SEQUENCE"              , 0x58, 6       , 5       , 4       , init_zm_reg_sequence            },
+	/* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */
+	{ "INIT_SUB_DIRECT"                   , 0x5B, 3       , 0       , 0       , init_sub_direct                 },
+	{ "INIT_COPY_NV_REG"                  , 0x5F, 22      , 0       , 0       , init_copy_nv_reg                },
+	{ "INIT_ZM_INDEX_IO"                  , 0x62, 5       , 0       , 0       , init_zm_index_io                },
+	{ "INIT_COMPUTE_MEM"                  , 0x63, 1       , 0       , 0       , init_compute_mem                },
+	{ "INIT_RESET"                        , 0x65, 13      , 0       , 0       , init_reset                      },
+	{ "INIT_CONFIGURE_MEM"                , 0x66, 1       , 0       , 0       , init_configure_mem              },
+	{ "INIT_CONFIGURE_CLK"                , 0x67, 1       , 0       , 0       , init_configure_clk              },
+	{ "INIT_CONFIGURE_PREINIT"            , 0x68, 1       , 0       , 0       , init_configure_preinit          },
+	{ "INIT_IO"                           , 0x69, 5       , 0       , 0       , init_io                         },
+	{ "INIT_SUB"                          , 0x6B, 2       , 0       , 0       , init_sub                        },
+	{ "INIT_RAM_CONDITION"                , 0x6D, 3       , 0       , 0       , init_ram_condition              },
+	{ "INIT_NV_REG"                       , 0x6E, 13      , 0       , 0       , init_nv_reg                     },
+	{ "INIT_MACRO"                        , 0x6F, 2       , 0       , 0       , init_macro                      },
+	{ "INIT_DONE"                         , 0x71, 1       , 0       , 0       , init_done                       },
+	{ "INIT_RESUME"                       , 0x72, 1       , 0       , 0       , init_resume                     },
+	/* INIT_RAM_CONDITION2 (0x73, 9, 0, 0) removed due to no example of use */
+	{ "INIT_TIME"                         , 0x74, 3       , 0       , 0       , init_time                       },
+	{ "INIT_CONDITION"                    , 0x75, 2       , 0       , 0       , init_condition                  },
+	{ "INIT_IO_CONDITION"                 , 0x76, 2       , 0       , 0       , init_io_condition               },
+	{ "INIT_INDEX_IO"                     , 0x78, 6       , 0       , 0       , init_index_io                   },
+	{ "INIT_PLL"                          , 0x79, 7       , 0       , 0       , init_pll                        },
+	{ "INIT_ZM_REG"                       , 0x7A, 9       , 0       , 0       , init_zm_reg                     },
+	/* INIT_RAM_RESTRICT_PLL's length is adjusted by the BIT M table */
+	{ "INIT_RAM_RESTRICT_PLL"             , 0x87, 2       , 0       , 0       , init_ram_restrict_pll           },
+	{ "INIT_8C"                           , 0x8C, 1       , 0       , 0       , init_8c                         },
+	{ "INIT_8D"                           , 0x8D, 1       , 0       , 0       , init_8d                         },
+	{ "INIT_GPIO"                         , 0x8E, 1       , 0       , 0       , init_gpio                       },
+	/* INIT_RAM_RESTRICT_ZM_REG_GROUP's mult is loaded by M table in BIT */
+	{ "INIT_RAM_RESTRICT_ZM_REG_GROUP"    , 0x8F, 7       , 6       , 0       , init_ram_restrict_zm_reg_group  },
+	{ "INIT_COPY_ZM_REG"                  , 0x90, 9       , 0       , 0       , init_copy_zm_reg                },
+	{ "INIT_ZM_REG_GROUP_ADDRESS_LATCHED" , 0x91, 6       , 5       , 4       , init_zm_reg_group_addr_latched  },
+	{ "INIT_RESERVED"                     , 0x92, 1       , 0       , 0       , init_reserved                   },
+	{ "INIT_96"                           , 0x96, 17      , 0       , 0       , init_96                         },
+	{ "INIT_97"                           , 0x97, 13      , 0       , 0       , init_97                         },
+	{ "INIT_AUXCH"                        , 0x98, 6       , 5       , 2       , init_auxch                      },
+	{ "INIT_ZM_AUXCH"                     , 0x99, 6       , 5       , 1       , init_zm_auxch                   },
+	{ NULL                                , 0   , 0       , 0       , 0       , NULL                            }
+};
+
+static unsigned int get_init_table_entry_length(struct nvbios *bios, unsigned int offset, int i)
+{
+	/* Calculates the length of a given init table entry. */
+	return itbl_entry[i].length + bios->data[offset + itbl_entry[i].length_offset]*itbl_entry[i].length_multiplier;
+}
+
+#define MAX_TABLE_OPS 1000
+
+static int
+parse_init_table(struct nvbios *bios, unsigned int offset,
+		 struct init_exec *iexec)
+{
+	/*
+	 * Parses all commands in an init table.
+	 *
+	 * We start out executing all commands found in the init table. Some
+	 * opcodes may change the status of iexec->execute to SKIP, which will
+	 * cause the following opcodes to perform no operation until the value
+	 * is changed back to EXECUTE.
+	 */
+
+	int count = 0, i;
+	uint8_t id;
+
+	/*
+	 * Loop until INIT_DONE causes us to break out of the loop
+	 * (or until offset > bios length just in case... )
+	 * (and no more than MAX_TABLE_OPS iterations, just in case... )
+	 */
+	while ((offset < bios->length) && (count++ < MAX_TABLE_OPS)) {
+		id = bios->data[offset];
+
+		/* Find matching id in itbl_entry */
+		for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != id); i++)
+			;
+
+		if (itbl_entry[i].name) {
+			BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n",
+				offset, itbl_entry[i].id, itbl_entry[i].name);
+
+			/* execute eventual command handler */
+			if (itbl_entry[i].handler)
+				if (!(*itbl_entry[i].handler)(bios, offset, iexec))
+					break;
+		} else {
+			NV_ERROR(bios->dev,
+				 "0x%04X: Init table command not found: "
+				 "0x%02X\n", offset, id);
+			return -ENOENT;
+		}
+
+		/*
+		 * Add the offset of the current command including all data
+		 * of that command. The offset will then be pointing on the
+		 * next op code.
+		 */
+		offset += get_init_table_entry_length(bios, offset, i);
+	}
+
+	if (offset >= bios->length)
+		NV_WARN(bios->dev,
+			"Offset 0x%04X greater than known bios image length.  "
+			"Corrupt image?\n", offset);
+	if (count >= MAX_TABLE_OPS)
+		NV_WARN(bios->dev,
+			"More than %d opcodes to a table is unlikely, "
+			"is the bios image corrupt?\n", MAX_TABLE_OPS);
+
+	return 0;
+}
+
+static void
+parse_init_tables(struct nvbios *bios)
+{
+	/* Loops and calls parse_init_table() for each present table. */
+
+	int i = 0;
+	uint16_t table;
+	struct init_exec iexec = {true, false};
+
+	if (bios->old_style_init) {
+		if (bios->init_script_tbls_ptr)
+			parse_init_table(bios, bios->init_script_tbls_ptr, &iexec);
+		if (bios->extra_init_script_tbl_ptr)
+			parse_init_table(bios, bios->extra_init_script_tbl_ptr, &iexec);
+
+		return;
+	}
+
+	while ((table = ROM16(bios->data[bios->init_script_tbls_ptr + i]))) {
+		NV_INFO(bios->dev,
+			"Parsing VBIOS init table %d at offset 0x%04X\n",
+			i / 2, table);
+		BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", table);
+
+		parse_init_table(bios, table, &iexec);
+		i += 2;
+	}
+}
+
+static uint16_t clkcmptable(struct nvbios *bios, uint16_t clktable, int pxclk)
+{
+	int compare_record_len, i = 0;
+	uint16_t compareclk, scriptptr = 0;
+
+	if (bios->major_version < 5) /* pre BIT */
+		compare_record_len = 3;
+	else
+		compare_record_len = 4;
+
+	do {
+		compareclk = ROM16(bios->data[clktable + compare_record_len * i]);
+		if (pxclk >= compareclk * 10) {
+			if (bios->major_version < 5) {
+				uint8_t tmdssub = bios->data[clktable + 2 + compare_record_len * i];
+				scriptptr = ROM16(bios->data[bios->init_script_tbls_ptr + tmdssub * 2]);
+			} else
+				scriptptr = ROM16(bios->data[clktable + 2 + compare_record_len * i]);
+			break;
+		}
+		i++;
+	} while (compareclk);
+
+	return scriptptr;
+}
+
+static void
+run_digital_op_script(struct drm_device *dev, uint16_t scriptptr,
+		      struct dcb_entry *dcbent, int head, bool dl)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->VBIOS;
+	struct init_exec iexec = {true, false};
+
+	NV_TRACE(dev, "0x%04X: Parsing digital output script table\n",
+		 scriptptr);
+	bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_44,
+		       head ? NV_CIO_CRE_44_HEADB : NV_CIO_CRE_44_HEADA);
+	/* note: if dcb entries have been merged, index may be misleading */
+	NVWriteVgaCrtc5758(dev, head, 0, dcbent->index);
+	parse_init_table(bios, scriptptr, &iexec);
+
+	nv04_dfp_bind_head(dev, dcbent, head, dl);
+}
+
+static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->VBIOS;
+	uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & OUTPUT_C ? 1 : 0);
+	uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]);
+
+	if (!bios->fp.xlated_entry || !sub || !scriptofs)
+		return -EINVAL;
+
+	run_digital_op_script(dev, scriptofs, dcbent, head, bios->fp.dual_link);
+
+	if (script == LVDS_PANEL_OFF) {
+		/* off-on delay in ms */
+		msleep(ROM16(bios->data[bios->fp.xlated_entry + 7]));
+	}
+#ifdef __powerpc__
+	/* Powerbook specific quirks */
+	if (script == LVDS_RESET && ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0329))
+		nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
+	if ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0189 || (dev->pci_device & 0xffff) == 0x0329) {
+		if (script == LVDS_PANEL_ON) {
+			bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) | (1 << 31));
+			bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
+		}
+		if (script == LVDS_PANEL_OFF) {
+			bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) & ~(1 << 31));
+			bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
+		}
+	}
+#endif
+
+	return 0;
+}
+
+static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script, int pxclk)
+{
+	/*
+	 * The BIT LVDS table's header has the information to setup the
+	 * necessary registers. Following the standard 4 byte header are:
+	 * A bitmask byte and a dual-link transition pxclk value for use in
+	 * selecting the init script when not using straps; 4 script pointers
+	 * for panel power, selected by output and on/off; and 8 table pointers
+	 * for panel init, the needed one determined by output, and bits in the
+	 * conf byte. These tables are similar to the TMDS tables, consisting
+	 * of a list of pxclks and script pointers.
+	 */
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->VBIOS;
+	unsigned int outputset = (dcbent->or == 4) ? 1 : 0;
+	uint16_t scriptptr = 0, clktable;
+	uint8_t clktableptr = 0;
+
+	/*
+	 * For now we assume version 3.0 table - g80 support will need some
+	 * changes
+	 */
+
+	switch (script) {
+	case LVDS_INIT:
+		return -ENOSYS;
+	case LVDS_BACKLIGHT_ON:
+	case LVDS_PANEL_ON:
+		scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 7 + outputset * 2]);
+		break;
+	case LVDS_BACKLIGHT_OFF:
+	case LVDS_PANEL_OFF:
+		scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 11 + outputset * 2]);
+		break;
+	case LVDS_RESET:
+		if (dcbent->lvdsconf.use_straps_for_mode) {
+			if (bios->fp.dual_link)
+				clktableptr += 2;
+			if (bios->fp.BITbit1)
+				clktableptr++;
+		} else {
+			/* using EDID */
+			uint8_t fallback = bios->data[bios->fp.lvdsmanufacturerpointer + 4];
+			int fallbackcmpval = (dcbent->or == 4) ? 4 : 1;
+
+			if (bios->fp.dual_link) {
+				clktableptr += 2;
+				fallbackcmpval *= 2;
+			}
+			if (fallbackcmpval & fallback)
+				clktableptr++;
+		}
+
+		/* adding outputset * 8 may not be correct */
+		clktable = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 15 + clktableptr * 2 + outputset * 8]);
+		if (!clktable) {
+			NV_ERROR(dev, "Pixel clock comparison table not found\n");
+			return -ENOENT;
+		}
+		scriptptr = clkcmptable(bios, clktable, pxclk);
+	}
+
+	if (!scriptptr) {
+		NV_ERROR(dev, "LVDS output init script not found\n");
+		return -ENOENT;
+	}
+	run_digital_op_script(dev, scriptptr, dcbent, head, bios->fp.dual_link);
+
+	return 0;
+}
+
+int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script, int pxclk)
+{
+	/*
+	 * LVDS operations are multiplexed in an effort to present a single API
+	 * which works with two vastly differing underlying structures.
+	 * This acts as the demux
+	 */
+
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->VBIOS;
+	uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
+	uint32_t sel_clk_binding, sel_clk;
+	int ret;
+
+	if (bios->fp.last_script_invoc == (script << 1 | head) || !lvds_ver ||
+	    (lvds_ver >= 0x30 && script == LVDS_INIT))
+		return 0;
+
+	if (!bios->fp.lvds_init_run) {
+		bios->fp.lvds_init_run = true;
+		call_lvds_script(dev, dcbent, head, LVDS_INIT, pxclk);
+	}
+
+	if (script == LVDS_PANEL_ON && bios->fp.reset_after_pclk_change)
+		call_lvds_script(dev, dcbent, head, LVDS_RESET, pxclk);
+	if (script == LVDS_RESET && bios->fp.power_off_for_reset)
+		call_lvds_script(dev, dcbent, head, LVDS_PANEL_OFF, pxclk);
+
+	NV_TRACE(dev, "Calling LVDS script %d:\n", script);
+
+	/* don't let script change pll->head binding */
+	sel_clk_binding = bios_rd32(bios, NV_PRAMDAC_SEL_CLK) & 0x50000;
+
+	if (lvds_ver < 0x30)
+		ret = call_lvds_manufacturer_script(dev, dcbent, head, script);
+	else
+		ret = run_lvds_table(dev, dcbent, head, script, pxclk);
+
+	bios->fp.last_script_invoc = (script << 1 | head);
+
+	sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
+	/* some scripts set a value in NV_PBUS_POWERCTRL_2 and break video overlay */
+	nvWriteMC(dev, NV_PBUS_POWERCTRL_2, 0);
+
+	return ret;
+}
+
+struct lvdstableheader {
+	uint8_t lvds_ver, headerlen, recordlen;
+};
+
+static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct nvbios *bios, struct lvdstableheader *lth)
+{
+	/*
+	 * BMP version (0xa) LVDS table has a simple header of version and
+	 * record length. The BIT LVDS table has the typical BIT table header:
+	 * version byte, header length byte, record length byte, and a byte for
+	 * the maximum number of records that can be held in the table.
+	 */
+
+	uint8_t lvds_ver, headerlen, recordlen;
+
+	memset(lth, 0, sizeof(struct lvdstableheader));
+
+	if (bios->fp.lvdsmanufacturerpointer == 0x0) {
+		NV_ERROR(dev, "Pointer to LVDS manufacturer table invalid\n");
+		return -EINVAL;
+	}
+
+	lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
+
+	switch (lvds_ver) {
+	case 0x0a:	/* pre NV40 */
+		headerlen = 2;
+		recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
+		break;
+	case 0x30:	/* NV4x */
+		headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
+		if (headerlen < 0x1f) {
+			NV_ERROR(dev, "LVDS table header not understood\n");
+			return -EINVAL;
+		}
+		recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2];
+		break;
+	case 0x40:	/* G80/G90 */
+		headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
+		if (headerlen < 0x7) {
+			NV_ERROR(dev, "LVDS table header not understood\n");
+			return -EINVAL;
+		}
+		recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2];
+		break;
+	default:
+		NV_ERROR(dev,
+			 "LVDS table revision %d.%d not currently supported\n",
+			 lvds_ver >> 4, lvds_ver & 0xf);
+		return -ENOSYS;
+	}
+
+	lth->lvds_ver = lvds_ver;
+	lth->headerlen = headerlen;
+	lth->recordlen = recordlen;
+
+	return 0;
+}
+
+static int
+get_fp_strap(struct drm_device *dev, struct nvbios *bios)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	/*
+	 * The fp strap is normally dictated by the "User Strap" in
+	 * PEXTDEV_BOOT_0[20:16], but on BMP cards when bit 2 of the
+	 * Internal_Flags struct at 0x48 is set, the user strap gets overriden
+	 * by the PCI subsystem ID during POST, but not before the previous user
+	 * strap has been committed to CR58 for CR57=0xf on head A, which may be
+	 * read and used instead
+	 */
+
+	if (bios->major_version < 5 && bios->data[0x48] & 0x4)
+		return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
+
+	if (dev_priv->card_type >= NV_50)
+		return (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
+	else
+		return (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 16) & 0xf;
+}
+
+static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
+{
+	uint8_t *fptable;
+	uint8_t fptable_ver, headerlen = 0, recordlen, fpentries = 0xf, fpindex;
+	int ret, ofs, fpstrapping;
+	struct lvdstableheader lth;
+
+	if (bios->fp.fptablepointer == 0x0) {
+		/* Apple cards don't have the fp table; the laptops use DDC */
+		/* The table is also missing on some x86 IGPs */
+#ifndef __powerpc__
+		NV_ERROR(dev, "Pointer to flat panel table invalid\n");
+#endif
+		bios->pub.digital_min_front_porch = 0x4b;
+		return 0;
+	}
+
+	fptable = &bios->data[bios->fp.fptablepointer];
+	fptable_ver = fptable[0];
+
+	switch (fptable_ver) {
+	/*
+	 * BMP version 0x5.0x11 BIOSen have version 1 like tables, but no
+	 * version field, and miss one of the spread spectrum/PWM bytes.
+	 * This could affect early GF2Go parts (not seen any appropriate ROMs
+	 * though). Here we assume that a version of 0x05 matches this case
+	 * (combining with a BMP version check would be better), as the
+	 * common case for the panel type field is 0x0005, and that is in
+	 * fact what we are reading the first byte of.
+	 */
+	case 0x05:	/* some NV10, 11, 15, 16 */
+		recordlen = 42;
+		ofs = -1;
+		break;
+	case 0x10:	/* some NV15/16, and NV11+ */
+		recordlen = 44;
+		ofs = 0;
+		break;
+	case 0x20:	/* NV40+ */
+		headerlen = fptable[1];
+		recordlen = fptable[2];
+		fpentries = fptable[3];
+		/*
+		 * fptable[4] is the minimum
+		 * RAMDAC_FP_HCRTC -> RAMDAC_FP_HSYNC_START gap
+		 */
+		bios->pub.digital_min_front_porch = fptable[4];
+		ofs = -7;
+		break;
+	default:
+		NV_ERROR(dev,
+			 "FP table revision %d.%d not currently supported\n",
+			 fptable_ver >> 4, fptable_ver & 0xf);
+		return -ENOSYS;
+	}
+
+	if (!bios->is_mobile) /* !mobile only needs digital_min_front_porch */
+		return 0;
+
+	ret = parse_lvds_manufacturer_table_header(dev, bios, &lth);
+	if (ret)
+		return ret;
+
+	if (lth.lvds_ver == 0x30 || lth.lvds_ver == 0x40) {
+		bios->fp.fpxlatetableptr = bios->fp.lvdsmanufacturerpointer +
+							lth.headerlen + 1;
+		bios->fp.xlatwidth = lth.recordlen;
+	}
+	if (bios->fp.fpxlatetableptr == 0x0) {
+		NV_ERROR(dev, "Pointer to flat panel xlat table invalid\n");
+		return -EINVAL;
+	}
+
+	fpstrapping = get_fp_strap(dev, bios);
+
+	fpindex = bios->data[bios->fp.fpxlatetableptr +
+					fpstrapping * bios->fp.xlatwidth];
+
+	if (fpindex > fpentries) {
+		NV_ERROR(dev, "Bad flat panel table index\n");
+		return -ENOENT;
+	}
+
+	/* nv4x cards need both a strap value and fpindex of 0xf to use DDC */
+	if (lth.lvds_ver > 0x10)
+		bios->pub.fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf;
+
+	/*
+	 * If either the strap or xlated fpindex value are 0xf there is no
+	 * panel using a strap-derived bios mode present.  this condition
+	 * includes, but is different from, the DDC panel indicator above
+	 */
+	if (fpstrapping == 0xf || fpindex == 0xf)
+		return 0;
+
+	bios->fp.mode_ptr = bios->fp.fptablepointer + headerlen +
+			    recordlen * fpindex + ofs;
+
+	NV_TRACE(dev, "BIOS FP mode: %dx%d (%dkHz pixel clock)\n",
+		 ROM16(bios->data[bios->fp.mode_ptr + 11]) + 1,
+		 ROM16(bios->data[bios->fp.mode_ptr + 25]) + 1,
+		 ROM16(bios->data[bios->fp.mode_ptr + 7]) * 10);
+
+	return 0;
+}
+
+bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->VBIOS;
+	uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr];
+
+	if (!mode)	/* just checking whether we can produce a mode */
+		return bios->fp.mode_ptr;
+
+	memset(mode, 0, sizeof(struct drm_display_mode));
+	/*
+	 * For version 1.0 (version in byte 0):
+	 * bytes 1-2 are "panel type", including bits on whether Colour/mono,
+	 * single/dual link, and type (TFT etc.)
+	 * bytes 3-6 are bits per colour in RGBX
+	 */
+	mode->clock = ROM16(mode_entry[7]) * 10;
+	/* bytes 9-10 is HActive */
+	mode->hdisplay = ROM16(mode_entry[11]) + 1;
+	/*
+	 * bytes 13-14 is HValid Start
+	 * bytes 15-16 is HValid End
+	 */
+	mode->hsync_start = ROM16(mode_entry[17]) + 1;
+	mode->hsync_end = ROM16(mode_entry[19]) + 1;
+	mode->htotal = ROM16(mode_entry[21]) + 1;
+	/* bytes 23-24, 27-30 similarly, but vertical */
+	mode->vdisplay = ROM16(mode_entry[25]) + 1;
+	mode->vsync_start = ROM16(mode_entry[31]) + 1;
+	mode->vsync_end = ROM16(mode_entry[33]) + 1;
+	mode->vtotal = ROM16(mode_entry[35]) + 1;
+	mode->flags |= (mode_entry[37] & 0x10) ?
+			DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
+	mode->flags |= (mode_entry[37] & 0x1) ?
+			DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+	/*
+	 * bytes 38-39 relate to spread spectrum settings
+	 * bytes 40-43 are something to do with PWM
+	 */
+
+	mode->status = MODE_OK;
+	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+	drm_mode_set_name(mode);
+	return bios->fp.mode_ptr;
+}
+
+int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, bool *if_is_24bit)
+{
+	/*
+	 * The LVDS table header is (mostly) described in
+	 * parse_lvds_manufacturer_table_header(): the BIT header additionally
+	 * contains the dual-link transition pxclk (in 10s kHz), at byte 5 - if
+	 * straps are not being used for the panel, this specifies the frequency
+	 * at which modes should be set up in the dual link style.
+	 *
+	 * Following the header, the BMP (ver 0xa) table has several records,
+	 * indexed by a seperate xlat table, indexed in turn by the fp strap in
+	 * EXTDEV_BOOT. Each record had a config byte, followed by 6 script
+	 * numbers for use by INIT_SUB which controlled panel init and power,
+	 * and finally a dword of ms to sleep between power off and on
+	 * operations.
+	 *
+	 * In the BIT versions, the table following the header serves as an
+	 * integrated config and xlat table: the records in the table are
+	 * indexed by the FP strap nibble in EXTDEV_BOOT, and each record has
+	 * two bytes - the first as a config byte, the second for indexing the
+	 * fp mode table pointed to by the BIT 'D' table
+	 *
+	 * DDC is not used until after card init, so selecting the correct table
+	 * entry and setting the dual link flag for EDID equipped panels,
+	 * requiring tests against the native-mode pixel clock, cannot be done
+	 * until later, when this function should be called with non-zero pxclk
+	 */
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->VBIOS;
+	int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0;
+	struct lvdstableheader lth;
+	uint16_t lvdsofs;
+	int ret, chip_version = bios->pub.chip_version;
+
+	ret = parse_lvds_manufacturer_table_header(dev, bios, &lth);
+	if (ret)
+		return ret;
+
+	switch (lth.lvds_ver) {
+	case 0x0a:	/* pre NV40 */
+		lvdsmanufacturerindex = bios->data[
+					bios->fp.fpxlatemanufacturertableptr +
+					fpstrapping];
+
+		/* we're done if this isn't the EDID panel case */
+		if (!pxclk)
+			break;
+
+		if (chip_version < 0x25) {
+			/* nv17 behaviour
+			 *
+			 * It seems the old style lvds script pointer is reused
+			 * to select 18/24 bit colour depth for EDID panels.
+			 */
+			lvdsmanufacturerindex =
+				(bios->legacy.lvds_single_a_script_ptr & 1) ?
+									2 : 0;
+			if (pxclk >= bios->fp.duallink_transition_clk)
+				lvdsmanufacturerindex++;
+		} else if (chip_version < 0x30) {
+			/* nv28 behaviour (off-chip encoder)
+			 *
+			 * nv28 does a complex dance of first using byte 121 of
+			 * the EDID to choose the lvdsmanufacturerindex, then
+			 * later attempting to match the EDID manufacturer and
+			 * product IDs in a table (signature 'pidt' (panel id
+			 * table?)), setting an lvdsmanufacturerindex of 0 and
+			 * an fp strap of the match index (or 0xf if none)
+			 */
+			lvdsmanufacturerindex = 0;
+		} else {
+			/* nv31, nv34 behaviour */
+			lvdsmanufacturerindex = 0;
+			if (pxclk >= bios->fp.duallink_transition_clk)
+				lvdsmanufacturerindex = 2;
+			if (pxclk >= 140000)
+				lvdsmanufacturerindex = 3;
+		}
+
+		/*
+		 * nvidia set the high nibble of (cr57=f, cr58) to
+		 * lvdsmanufacturerindex in this case; we don't
+		 */
+		break;
+	case 0x30:	/* NV4x */
+	case 0x40:	/* G80/G90 */
+		lvdsmanufacturerindex = fpstrapping;
+		break;
+	default:
+		NV_ERROR(dev, "LVDS table revision not currently supported\n");
+		return -ENOSYS;
+	}
+
+	lvdsofs = bios->fp.xlated_entry = bios->fp.lvdsmanufacturerpointer + lth.headerlen + lth.recordlen * lvdsmanufacturerindex;
+	switch (lth.lvds_ver) {
+	case 0x0a:
+		bios->fp.power_off_for_reset = bios->data[lvdsofs] & 1;
+		bios->fp.reset_after_pclk_change = bios->data[lvdsofs] & 2;
+		bios->fp.dual_link = bios->data[lvdsofs] & 4;
+		bios->fp.link_c_increment = bios->data[lvdsofs] & 8;
+		*if_is_24bit = bios->data[lvdsofs] & 16;
+		break;
+	case 0x30:
+		/*
+		 * My money would be on there being a 24 bit interface bit in
+		 * this table, but I have no example of a laptop bios with a
+		 * 24 bit panel to confirm that. Hence we shout loudly if any
+		 * bit other than bit 0 is set (I've not even seen bit 1)
+		 */
+		if (bios->data[lvdsofs] > 1)
+			NV_ERROR(dev,
+				 "You have a very unusual laptop display; please report it\n");
+		/*
+		 * No sign of the "power off for reset" or "reset for panel
+		 * on" bits, but it's safer to assume we should
+		 */
+		bios->fp.power_off_for_reset = true;
+		bios->fp.reset_after_pclk_change = true;
+		/*
+		 * It's ok lvdsofs is wrong for nv4x edid case; dual_link is
+		 * over-written, and BITbit1 isn't used
+		 */
+		bios->fp.dual_link = bios->data[lvdsofs] & 1;
+		bios->fp.BITbit1 = bios->data[lvdsofs] & 2;
+		bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10;
+		break;
+	case 0x40:
+		bios->fp.dual_link = bios->data[lvdsofs] & 1;
+		bios->fp.if_is_24bit = bios->data[lvdsofs] & 2;
+		bios->fp.strapless_is_24bit = bios->data[bios->fp.lvdsmanufacturerpointer + 4];
+		bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10;
+		break;
+	}
+
+	/* set dual_link flag for EDID case */
+	if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
+		bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk);
+
+	*dl = bios->fp.dual_link;
+
+	return 0;
+}
+
+static uint8_t *
+bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent,
+			 uint16_t record, int record_len, int record_nr)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->VBIOS;
+	uint32_t entry;
+	uint16_t table;
+	int i, v;
+
+	for (i = 0; i < record_nr; i++, record += record_len) {
+		table = ROM16(bios->data[record]);
+		if (!table)
+			continue;
+		entry = ROM32(bios->data[table]);
+
+		v = (entry & 0x000f0000) >> 16;
+		if (!(v & dcbent->or))
+			continue;
+
+		v = (entry & 0x000000f0) >> 4;
+		if (v != dcbent->location)
+			continue;
+
+		v = (entry & 0x0000000f);
+		if (v != dcbent->type)
+			continue;
+
+		return &bios->data[table];
+	}
+
+	return NULL;
+}
+
+void *
+nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
+		      int *length)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->VBIOS;
+	uint8_t *table;
+
+	if (!bios->display.dp_table_ptr) {
+		NV_ERROR(dev, "No pointer to DisplayPort table\n");
+		return NULL;
+	}
+	table = &bios->data[bios->display.dp_table_ptr];
+
+	if (table[0] != 0x21) {
+		NV_ERROR(dev, "DisplayPort table version 0x%02x unknown\n",
+			 table[0]);
+		return NULL;
+	}
+
+	*length = table[4];
+	return bios_output_config_match(dev, dcbent,
+					bios->display.dp_table_ptr + table[1],
+					table[2], table[3]);
+}
+
+int
+nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
+			       uint32_t sub, int pxclk)
+{
+	/*
+	 * The display script table is located by the BIT 'U' table.
+	 *
+	 * It contains an array of pointers to various tables describing
+	 * a particular output type.  The first 32-bits of the output
+	 * tables contains similar information to a DCB entry, and is
+	 * used to decide whether that particular table is suitable for
+	 * the output you want to access.
+	 *
+	 * The "record header length" field here seems to indicate the
+	 * offset of the first configuration entry in the output tables.
+	 * This is 10 on most cards I've seen, but 12 has been witnessed
+	 * on DP cards, and there's another script pointer within the
+	 * header.
+	 *
+	 * offset + 0   ( 8 bits): version
+	 * offset + 1   ( 8 bits): header length
+	 * offset + 2   ( 8 bits): record length
+	 * offset + 3   ( 8 bits): number of records
+	 * offset + 4   ( 8 bits): record header length
+	 * offset + 5   (16 bits): pointer to first output script table
+	 */
+
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct init_exec iexec = {true, false};
+	struct nvbios *bios = &dev_priv->VBIOS;
+	uint8_t *table = &bios->data[bios->display.script_table_ptr];
+	uint8_t *otable = NULL;
+	uint16_t script;
+	int i = 0;
+
+	if (!bios->display.script_table_ptr) {
+		NV_ERROR(dev, "No pointer to output script table\n");
+		return 1;
+	}
+
+	/*
+	 * Nothing useful has been in any of the pre-2.0 tables I've seen,
+	 * so until they are, we really don't need to care.
+	 */
+	if (table[0] < 0x20)
+		return 1;
+
+	if (table[0] != 0x20 && table[0] != 0x21) {
+		NV_ERROR(dev, "Output script table version 0x%02x unknown\n",
+			 table[0]);
+		return 1;
+	}
+
+	/*
+	 * The output script tables describing a particular output type
+	 * look as follows:
+	 *
+	 * offset + 0   (32 bits): output this table matches (hash of DCB)
+	 * offset + 4   ( 8 bits): unknown
+	 * offset + 5   ( 8 bits): number of configurations
+	 * offset + 6   (16 bits): pointer to some script
+	 * offset + 8   (16 bits): pointer to some script
+	 *
+	 * headerlen == 10
+	 * offset + 10           : configuration 0
+	 *
+	 * headerlen == 12
+	 * offset + 10           : pointer to some script
+	 * offset + 12           : configuration 0
+	 *
+	 * Each config entry is as follows:
+	 *
+	 * offset + 0   (16 bits): unknown, assumed to be a match value
+	 * offset + 2   (16 bits): pointer to script table (clock set?)
+	 * offset + 4   (16 bits): pointer to script table (reset?)
+	 *
+	 * There doesn't appear to be a count value to say how many
+	 * entries exist in each script table, instead, a 0 value in
+	 * the first 16-bit word seems to indicate both the end of the
+	 * list and the default entry.  The second 16-bit word in the
+	 * script tables is a pointer to the script to execute.
+	 */
+
+	NV_DEBUG(dev, "Searching for output entry for %d %d %d\n",
+			dcbent->type, dcbent->location, dcbent->or);
+	otable = bios_output_config_match(dev, dcbent, table[1] +
+					  bios->display.script_table_ptr,
+					  table[2], table[3]);
+	if (!otable) {
+		NV_ERROR(dev, "Couldn't find matching output script table\n");
+		return 1;
+	}
+
+	if (pxclk < -2 || pxclk > 0) {
+		/* Try to find matching script table entry */
+		for (i = 0; i < otable[5]; i++) {
+			if (ROM16(otable[table[4] + i*6]) == sub)
+				break;
+		}
+
+		if (i == otable[5]) {
+			NV_ERROR(dev, "Table 0x%04x not found for %d/%d, "
+				      "using first\n",
+				 sub, dcbent->type, dcbent->or);
+			i = 0;
+		}
+	}
+
+	bios->display.output = dcbent;
+
+	if (pxclk == 0) {
+		script = ROM16(otable[6]);
+		if (!script) {
+			NV_DEBUG(dev, "output script 0 not found\n");
+			return 1;
+		}
+
+		NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
+		parse_init_table(bios, script, &iexec);
+	} else
+	if (pxclk == -1) {
+		script = ROM16(otable[8]);
+		if (!script) {
+			NV_DEBUG(dev, "output script 1 not found\n");
+			return 1;
+		}
+
+		NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
+		parse_init_table(bios, script, &iexec);
+	} else
+	if (pxclk == -2) {
+		if (table[4] >= 12)
+			script = ROM16(otable[10]);
+		else
+			script = 0;
+		if (!script) {
+			NV_DEBUG(dev, "output script 2 not found\n");
+			return 1;
+		}
+
+		NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
+		parse_init_table(bios, script, &iexec);
+	} else
+	if (pxclk > 0) {
+		script = ROM16(otable[table[4] + i*6 + 2]);
+		if (script)
+			script = clkcmptable(bios, script, pxclk);
+		if (!script) {
+			NV_ERROR(dev, "clock script 0 not found\n");
+			return 1;
+		}
+
+		NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
+		parse_init_table(bios, script, &iexec);
+	} else
+	if (pxclk < 0) {
+		script = ROM16(otable[table[4] + i*6 + 4]);
+		if (script)
+			script = clkcmptable(bios, script, -pxclk);
+		if (!script) {
+			NV_DEBUG(dev, "clock script 1 not found\n");
+			return 1;
+		}
+
+		NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
+		parse_init_table(bios, script, &iexec);
+	}
+
+	return 0;
+}
+
+
+int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, int pxclk)
+{
+	/*
+	 * the pxclk parameter is in kHz
+	 *
+	 * This runs the TMDS regs setting code found on BIT bios cards
+	 *
+	 * For ffs(or) == 1 use the first table, for ffs(or) == 2 and
+	 * ffs(or) == 3, use the second.
+	 */
+
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->VBIOS;
+	int cv = bios->pub.chip_version;
+	uint16_t clktable = 0, scriptptr;
+	uint32_t sel_clk_binding, sel_clk;
+
+	/* pre-nv17 off-chip tmds uses scripts, post nv17 doesn't */
+	if (cv >= 0x17 && cv != 0x1a && cv != 0x20 &&
+	    dcbent->location != DCB_LOC_ON_CHIP)
+		return 0;
+
+	switch (ffs(dcbent->or)) {
+	case 1:
+		clktable = bios->tmds.output0_script_ptr;
+		break;
+	case 2:
+	case 3:
+		clktable = bios->tmds.output1_script_ptr;
+		break;
+	}
+
+	if (!clktable) {
+		NV_ERROR(dev, "Pixel clock comparison table not found\n");
+		return -EINVAL;
+	}
+
+	scriptptr = clkcmptable(bios, clktable, pxclk);
+
+	if (!scriptptr) {
+		NV_ERROR(dev, "TMDS output init script not found\n");
+		return -ENOENT;
+	}
+
+	/* don't let script change pll->head binding */
+	sel_clk_binding = bios_rd32(bios, NV_PRAMDAC_SEL_CLK) & 0x50000;
+	run_digital_op_script(dev, scriptptr, dcbent, head, pxclk >= 165000);
+	sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
+
+	return 0;
+}
+
+int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims *pll_lim)
+{
+	/*
+	 * PLL limits table
+	 *
+	 * Version 0x10: NV30, NV31
+	 * One byte header (version), one record of 24 bytes
+	 * Version 0x11: NV36 - Not implemented
+	 * Seems to have same record style as 0x10, but 3 records rather than 1
+	 * Version 0x20: Found on Geforce 6 cards
+	 * Trivial 4 byte BIT header. 31 (0x1f) byte record length
+	 * Version 0x21: Found on Geforce 7, 8 and some Geforce 6 cards
+	 * 5 byte header, fifth byte of unknown purpose. 35 (0x23) byte record
+	 * length in general, some (integrated) have an extra configuration byte
+	 * Version 0x30: Found on Geforce 8, separates the register mapping
+	 * from the limits tables.
+	 */
+
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->VBIOS;
+	int cv = bios->pub.chip_version, pllindex = 0;
+	uint8_t pll_lim_ver = 0, headerlen = 0, recordlen = 0, entries = 0;
+	uint32_t crystal_strap_mask, crystal_straps;
+
+	if (!bios->pll_limit_tbl_ptr) {
+		if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
+		    cv >= 0x40) {
+			NV_ERROR(dev, "Pointer to PLL limits table invalid\n");
+			return -EINVAL;
+		}
+	} else
+		pll_lim_ver = bios->data[bios->pll_limit_tbl_ptr];
+
+	crystal_strap_mask = 1 << 6;
+	/* open coded dev->twoHeads test */
+	if (cv > 0x10 && cv != 0x15 && cv != 0x1a && cv != 0x20)
+		crystal_strap_mask |= 1 << 22;
+	crystal_straps = nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) &
+							crystal_strap_mask;
+
+	switch (pll_lim_ver) {
+	/*
+	 * We use version 0 to indicate a pre limit table bios (single stage
+	 * pll) and load the hard coded limits instead.
+	 */
+	case 0:
+		break;
+	case 0x10:
+	case 0x11:
+		/*
+		 * Strictly v0x11 has 3 entries, but the last two don't seem
+		 * to get used.
+		 */
+		headerlen = 1;
+		recordlen = 0x18;
+		entries = 1;
+		pllindex = 0;
+		break;
+	case 0x20:
+	case 0x21:
+	case 0x30:
+	case 0x40:
+		headerlen = bios->data[bios->pll_limit_tbl_ptr + 1];
+		recordlen = bios->data[bios->pll_limit_tbl_ptr + 2];
+		entries = bios->data[bios->pll_limit_tbl_ptr + 3];
+		break;
+	default:
+		NV_ERROR(dev, "PLL limits table revision 0x%X not currently "
+				"supported\n", pll_lim_ver);
+		return -ENOSYS;
+	}
+
+	/* initialize all members to zero */
+	memset(pll_lim, 0, sizeof(struct pll_lims));
+
+	if (pll_lim_ver == 0x10 || pll_lim_ver == 0x11) {
+		uint8_t *pll_rec = &bios->data[bios->pll_limit_tbl_ptr + headerlen + recordlen * pllindex];
+
+		pll_lim->vco1.minfreq = ROM32(pll_rec[0]);
+		pll_lim->vco1.maxfreq = ROM32(pll_rec[4]);
+		pll_lim->vco2.minfreq = ROM32(pll_rec[8]);
+		pll_lim->vco2.maxfreq = ROM32(pll_rec[12]);
+		pll_lim->vco1.min_inputfreq = ROM32(pll_rec[16]);
+		pll_lim->vco2.min_inputfreq = ROM32(pll_rec[20]);
+		pll_lim->vco1.max_inputfreq = pll_lim->vco2.max_inputfreq = INT_MAX;
+
+		/* these values taken from nv30/31/36 */
+		pll_lim->vco1.min_n = 0x1;
+		if (cv == 0x36)
+			pll_lim->vco1.min_n = 0x5;
+		pll_lim->vco1.max_n = 0xff;
+		pll_lim->vco1.min_m = 0x1;
+		pll_lim->vco1.max_m = 0xd;
+		pll_lim->vco2.min_n = 0x4;
+		/*
+		 * On nv30, 31, 36 (i.e. all cards with two stage PLLs with this
+		 * table version (apart from nv35)), N2 is compared to
+		 * maxN2 (0x46) and 10 * maxM2 (0x4), so set maxN2 to 0x28 and
+		 * save a comparison
+		 */
+		pll_lim->vco2.max_n = 0x28;
+		if (cv == 0x30 || cv == 0x35)
+			/* only 5 bits available for N2 on nv30/35 */
+			pll_lim->vco2.max_n = 0x1f;
+		pll_lim->vco2.min_m = 0x1;
+		pll_lim->vco2.max_m = 0x4;
+		pll_lim->max_log2p = 0x7;
+		pll_lim->max_usable_log2p = 0x6;
+	} else if (pll_lim_ver == 0x20 || pll_lim_ver == 0x21) {
+		uint16_t plloffs = bios->pll_limit_tbl_ptr + headerlen;
+		uint32_t reg = 0; /* default match */
+		uint8_t *pll_rec;
+		int i;
+
+		/*
+		 * First entry is default match, if nothing better. warn if
+		 * reg field nonzero
+		 */
+		if (ROM32(bios->data[plloffs]))
+			NV_WARN(dev, "Default PLL limit entry has non-zero "
+				       "register field\n");
+
+		if (limit_match > MAX_PLL_TYPES)
+			/* we've been passed a reg as the match */
+			reg = limit_match;
+		else /* limit match is a pll type */
+			for (i = 1; i < entries && !reg; i++) {
+				uint32_t cmpreg = ROM32(bios->data[plloffs + recordlen * i]);
+
+				if (limit_match == NVPLL &&
+				    (cmpreg == NV_PRAMDAC_NVPLL_COEFF || cmpreg == 0x4000))
+					reg = cmpreg;
+				if (limit_match == MPLL &&
+				    (cmpreg == NV_PRAMDAC_MPLL_COEFF || cmpreg == 0x4020))
+					reg = cmpreg;
+				if (limit_match == VPLL1 &&
+				    (cmpreg == NV_PRAMDAC_VPLL_COEFF || cmpreg == 0x4010))
+					reg = cmpreg;
+				if (limit_match == VPLL2 &&
+				    (cmpreg == NV_RAMDAC_VPLL2 || cmpreg == 0x4018))
+					reg = cmpreg;
+			}
+
+		for (i = 1; i < entries; i++)
+			if (ROM32(bios->data[plloffs + recordlen * i]) == reg) {
+				pllindex = i;
+				break;
+			}
+
+		pll_rec = &bios->data[plloffs + recordlen * pllindex];
+
+		BIOSLOG(bios, "Loading PLL limits for reg 0x%08x\n",
+			pllindex ? reg : 0);
+
+		/*
+		 * Frequencies are stored in tables in MHz, kHz are more
+		 * useful, so we convert.
+		 */
+
+		/* What output frequencies can each VCO generate? */
+		pll_lim->vco1.minfreq = ROM16(pll_rec[4]) * 1000;
+		pll_lim->vco1.maxfreq = ROM16(pll_rec[6]) * 1000;
+		pll_lim->vco2.minfreq = ROM16(pll_rec[8]) * 1000;
+		pll_lim->vco2.maxfreq = ROM16(pll_rec[10]) * 1000;
+
+		/* What input frequencies they accept (past the m-divider)? */
+		pll_lim->vco1.min_inputfreq = ROM16(pll_rec[12]) * 1000;
+		pll_lim->vco2.min_inputfreq = ROM16(pll_rec[14]) * 1000;
+		pll_lim->vco1.max_inputfreq = ROM16(pll_rec[16]) * 1000;
+		pll_lim->vco2.max_inputfreq = ROM16(pll_rec[18]) * 1000;
+
+		/* What values are accepted as multiplier and divider? */
+		pll_lim->vco1.min_n = pll_rec[20];
+		pll_lim->vco1.max_n = pll_rec[21];
+		pll_lim->vco1.min_m = pll_rec[22];
+		pll_lim->vco1.max_m = pll_rec[23];
+		pll_lim->vco2.min_n = pll_rec[24];
+		pll_lim->vco2.max_n = pll_rec[25];
+		pll_lim->vco2.min_m = pll_rec[26];
+		pll_lim->vco2.max_m = pll_rec[27];
+
+		pll_lim->max_usable_log2p = pll_lim->max_log2p = pll_rec[29];
+		if (pll_lim->max_log2p > 0x7)
+			/* pll decoding in nv_hw.c assumes never > 7 */
+			NV_WARN(dev, "Max log2 P value greater than 7 (%d)\n",
+				pll_lim->max_log2p);
+		if (cv < 0x60)
+			pll_lim->max_usable_log2p = 0x6;
+		pll_lim->log2p_bias = pll_rec[30];
+
+		if (recordlen > 0x22)
+			pll_lim->refclk = ROM32(pll_rec[31]);
+
+		if (recordlen > 0x23 && pll_rec[35])
+			NV_WARN(dev,
+				"Bits set in PLL configuration byte (%x)\n",
+				pll_rec[35]);
+
+		/* C51 special not seen elsewhere */
+		if (cv == 0x51 && !pll_lim->refclk) {
+			uint32_t sel_clk = bios_rd32(bios, NV_PRAMDAC_SEL_CLK);
+
+			if (((limit_match == NV_PRAMDAC_VPLL_COEFF || limit_match == VPLL1) && sel_clk & 0x20) ||
+			    ((limit_match == NV_RAMDAC_VPLL2 || limit_match == VPLL2) && sel_clk & 0x80)) {
+				if (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_CHIP_ID_INDEX) < 0xa3)
+					pll_lim->refclk = 200000;
+				else
+					pll_lim->refclk = 25000;
+			}
+		}
+	} else if (pll_lim_ver == 0x30) { /* ver 0x30 */
+		uint8_t *entry = &bios->data[bios->pll_limit_tbl_ptr + headerlen];
+		uint8_t *record = NULL;
+		int i;
+
+		BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
+			limit_match);
+
+		for (i = 0; i < entries; i++, entry += recordlen) {
+			if (ROM32(entry[3]) == limit_match) {
+				record = &bios->data[ROM16(entry[1])];
+				break;
+			}
+		}
+
+		if (!record) {
+			NV_ERROR(dev, "Register 0x%08x not found in PLL "
+				 "limits table", limit_match);
+			return -ENOENT;
+		}
+
+		pll_lim->vco1.minfreq = ROM16(record[0]) * 1000;
+		pll_lim->vco1.maxfreq = ROM16(record[2]) * 1000;
+		pll_lim->vco2.minfreq = ROM16(record[4]) * 1000;
+		pll_lim->vco2.maxfreq = ROM16(record[6]) * 1000;
+		pll_lim->vco1.min_inputfreq = ROM16(record[8]) * 1000;
+		pll_lim->vco2.min_inputfreq = ROM16(record[10]) * 1000;
+		pll_lim->vco1.max_inputfreq = ROM16(record[12]) * 1000;
+		pll_lim->vco2.max_inputfreq = ROM16(record[14]) * 1000;
+		pll_lim->vco1.min_n = record[16];
+		pll_lim->vco1.max_n = record[17];
+		pll_lim->vco1.min_m = record[18];
+		pll_lim->vco1.max_m = record[19];
+		pll_lim->vco2.min_n = record[20];
+		pll_lim->vco2.max_n = record[21];
+		pll_lim->vco2.min_m = record[22];
+		pll_lim->vco2.max_m = record[23];
+		pll_lim->max_usable_log2p = pll_lim->max_log2p = record[25];
+		pll_lim->log2p_bias = record[27];
+		pll_lim->refclk = ROM32(record[28]);
+	} else if (pll_lim_ver) { /* ver 0x40 */
+		uint8_t *entry = &bios->data[bios->pll_limit_tbl_ptr + headerlen];
+		uint8_t *record = NULL;
+		int i;
+
+		BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
+			limit_match);
+
+		for (i = 0; i < entries; i++, entry += recordlen) {
+			if (ROM32(entry[3]) == limit_match) {
+				record = &bios->data[ROM16(entry[1])];
+				break;
+			}
+		}
+
+		if (!record) {
+			NV_ERROR(dev, "Register 0x%08x not found in PLL "
+				 "limits table", limit_match);
+			return -ENOENT;
+		}
+
+		pll_lim->vco1.minfreq = ROM16(record[0]) * 1000;
+		pll_lim->vco1.maxfreq = ROM16(record[2]) * 1000;
+		pll_lim->vco1.min_inputfreq = ROM16(record[4]) * 1000;
+		pll_lim->vco1.max_inputfreq = ROM16(record[6]) * 1000;
+		pll_lim->vco1.min_m = record[8];
+		pll_lim->vco1.max_m = record[9];
+		pll_lim->vco1.min_n = record[10];
+		pll_lim->vco1.max_n = record[11];
+		pll_lim->min_p = record[12];
+		pll_lim->max_p = record[13];
+		/* where did this go to?? */
+		if (limit_match == 0x00614100 || limit_match == 0x00614900)
+			pll_lim->refclk = 27000;
+		else
+			pll_lim->refclk = 100000;
+	}
+
+	/*
+	 * By now any valid limit table ought to have set a max frequency for
+	 * vco1, so if it's zero it's either a pre limit table bios, or one
+	 * with an empty limit table (seen on nv18)
+	 */
+	if (!pll_lim->vco1.maxfreq) {
+		pll_lim->vco1.minfreq = bios->fminvco;
+		pll_lim->vco1.maxfreq = bios->fmaxvco;
+		pll_lim->vco1.min_inputfreq = 0;
+		pll_lim->vco1.max_inputfreq = INT_MAX;
+		pll_lim->vco1.min_n = 0x1;
+		pll_lim->vco1.max_n = 0xff;
+		pll_lim->vco1.min_m = 0x1;
+		if (crystal_straps == 0) {
+			/* nv05 does this, nv11 doesn't, nv10 unknown */
+			if (cv < 0x11)
+				pll_lim->vco1.min_m = 0x7;
+			pll_lim->vco1.max_m = 0xd;
+		} else {
+			if (cv < 0x11)
+				pll_lim->vco1.min_m = 0x8;
+			pll_lim->vco1.max_m = 0xe;
+		}
+		if (cv < 0x17 || cv == 0x1a || cv == 0x20)
+			pll_lim->max_log2p = 4;
+		else
+			pll_lim->max_log2p = 5;
+		pll_lim->max_usable_log2p = pll_lim->max_log2p;
+	}
+
+	if (!pll_lim->refclk)
+		switch (crystal_straps) {
+		case 0:
+			pll_lim->refclk = 13500;
+			break;
+		case (1 << 6):
+			pll_lim->refclk = 14318;
+			break;
+		case (1 << 22):
+			pll_lim->refclk = 27000;
+			break;
+		case (1 << 22 | 1 << 6):
+			pll_lim->refclk = 25000;
+			break;
+		}
+
+#if 0 /* for easy debugging */
+	ErrorF("pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq);
+	ErrorF("pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq);
+	ErrorF("pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq);
+	ErrorF("pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq);
+
+	ErrorF("pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq);
+	ErrorF("pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq);
+	ErrorF("pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq);
+	ErrorF("pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq);
+
+	ErrorF("pll.vco1.min_n: %d\n", pll_lim->vco1.min_n);
+	ErrorF("pll.vco1.max_n: %d\n", pll_lim->vco1.max_n);
+	ErrorF("pll.vco1.min_m: %d\n", pll_lim->vco1.min_m);
+	ErrorF("pll.vco1.max_m: %d\n", pll_lim->vco1.max_m);
+	ErrorF("pll.vco2.min_n: %d\n", pll_lim->vco2.min_n);
+	ErrorF("pll.vco2.max_n: %d\n", pll_lim->vco2.max_n);
+	ErrorF("pll.vco2.min_m: %d\n", pll_lim->vco2.min_m);
+	ErrorF("pll.vco2.max_m: %d\n", pll_lim->vco2.max_m);
+
+	ErrorF("pll.max_log2p: %d\n", pll_lim->max_log2p);
+	ErrorF("pll.log2p_bias: %d\n", pll_lim->log2p_bias);
+
+	ErrorF("pll.refclk: %d\n", pll_lim->refclk);
+#endif
+
+	return 0;
+}
+
+static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint16_t offset)
+{
+	/*
+	 * offset + 0  (8 bits): Micro version
+	 * offset + 1  (8 bits): Minor version
+	 * offset + 2  (8 bits): Chip version
+	 * offset + 3  (8 bits): Major version
+	 */
+
+	bios->major_version = bios->data[offset + 3];
+	bios->pub.chip_version = bios->data[offset + 2];
+	NV_TRACE(dev, "Bios version %02x.%02x.%02x.%02x\n",
+		 bios->data[offset + 3], bios->data[offset + 2],
+		 bios->data[offset + 1], bios->data[offset]);
+}
+
+static void parse_script_table_pointers(struct nvbios *bios, uint16_t offset)
+{
+	/*
+	 * Parses the init table segment for pointers used in script execution.
+	 *
+	 * offset + 0  (16 bits): init script tables pointer
+	 * offset + 2  (16 bits): macro index table pointer
+	 * offset + 4  (16 bits): macro table pointer
+	 * offset + 6  (16 bits): condition table pointer
+	 * offset + 8  (16 bits): io condition table pointer
+	 * offset + 10 (16 bits): io flag condition table pointer
+	 * offset + 12 (16 bits): init function table pointer
+	 */
+
+	bios->init_script_tbls_ptr = ROM16(bios->data[offset]);
+	bios->macro_index_tbl_ptr = ROM16(bios->data[offset + 2]);
+	bios->macro_tbl_ptr = ROM16(bios->data[offset + 4]);
+	bios->condition_tbl_ptr = ROM16(bios->data[offset + 6]);
+	bios->io_condition_tbl_ptr = ROM16(bios->data[offset + 8]);
+	bios->io_flag_condition_tbl_ptr = ROM16(bios->data[offset + 10]);
+	bios->init_function_tbl_ptr = ROM16(bios->data[offset + 12]);
+}
+
+static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+	/*
+	 * Parses the load detect values for g80 cards.
+	 *
+	 * offset + 0 (16 bits): loadval table pointer
+	 */
+
+	uint16_t load_table_ptr;
+	uint8_t version, headerlen, entrylen, num_entries;
+
+	if (bitentry->length != 3) {
+		NV_ERROR(dev, "Do not understand BIT A table\n");
+		return -EINVAL;
+	}
+
+	load_table_ptr = ROM16(bios->data[bitentry->offset]);
+
+	if (load_table_ptr == 0x0) {
+		NV_ERROR(dev, "Pointer to BIT loadval table invalid\n");
+		return -EINVAL;
+	}
+
+	version = bios->data[load_table_ptr];
+
+	if (version != 0x10) {
+		NV_ERROR(dev, "BIT loadval table version %d.%d not supported\n",
+			 version >> 4, version & 0xF);
+		return -ENOSYS;
+	}
+
+	headerlen = bios->data[load_table_ptr + 1];
+	entrylen = bios->data[load_table_ptr + 2];
+	num_entries = bios->data[load_table_ptr + 3];
+
+	if (headerlen != 4 || entrylen != 4 || num_entries != 2) {
+		NV_ERROR(dev, "Do not understand BIT loadval table\n");
+		return -EINVAL;
+	}
+
+	/* First entry is normal dac, 2nd tv-out perhaps? */
+	bios->pub.dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff;
+
+	return 0;
+}
+
+static int parse_bit_C_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+	/*
+	 * offset + 8  (16 bits): PLL limits table pointer
+	 *
+	 * There's more in here, but that's unknown.
+	 */
+
+	if (bitentry->length < 10) {
+		NV_ERROR(dev, "Do not understand BIT C table\n");
+		return -EINVAL;
+	}
+
+	bios->pll_limit_tbl_ptr = ROM16(bios->data[bitentry->offset + 8]);
+
+	return 0;
+}
+
+static int parse_bit_display_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+	/*
+	 * Parses the flat panel table segment that the bit entry points to.
+	 * Starting at bitentry->offset:
+	 *
+	 * offset + 0  (16 bits): ??? table pointer - seems to have 18 byte
+	 * records beginning with a freq.
+	 * offset + 2  (16 bits): mode table pointer
+	 */
+
+	if (bitentry->length != 4) {
+		NV_ERROR(dev, "Do not understand BIT display table\n");
+		return -EINVAL;
+	}
+
+	bios->fp.fptablepointer = ROM16(bios->data[bitentry->offset + 2]);
+
+	return 0;
+}
+
+static int parse_bit_init_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+	/*
+	 * Parses the init table segment that the bit entry points to.
+	 *
+	 * See parse_script_table_pointers for layout
+	 */
+
+	if (bitentry->length < 14) {
+		NV_ERROR(dev, "Do not understand init table\n");
+		return -EINVAL;
+	}
+
+	parse_script_table_pointers(bios, bitentry->offset);
+
+	if (bitentry->length >= 16)
+		bios->some_script_ptr = ROM16(bios->data[bitentry->offset + 14]);
+	if (bitentry->length >= 18)
+		bios->init96_tbl_ptr = ROM16(bios->data[bitentry->offset + 16]);
+
+	return 0;
+}
+
+static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+	/*
+	 * BIT 'i' (info?) table
+	 *
+	 * offset + 0  (32 bits): BIOS version dword (as in B table)
+	 * offset + 5  (8  bits): BIOS feature byte (same as for BMP?)
+	 * offset + 13 (16 bits): pointer to table containing DAC load
+	 * detection comparison values
+	 *
+	 * There's other things in the table, purpose unknown
+	 */
+
+	uint16_t daccmpoffset;
+	uint8_t dacver, dacheaderlen;
+
+	if (bitentry->length < 6) {
+		NV_ERROR(dev, "BIT i table too short for needed information\n");
+		return -EINVAL;
+	}
+
+	parse_bios_version(dev, bios, bitentry->offset);
+
+	/*
+	 * bit 4 seems to indicate a mobile bios (doesn't suffer from BMP's
+	 * Quadro identity crisis), other bits possibly as for BMP feature byte
+	 */
+	bios->feature_byte = bios->data[bitentry->offset + 5];
+	bios->is_mobile = bios->feature_byte & FEATURE_MOBILE;
+
+	if (bitentry->length < 15) {
+		NV_WARN(dev, "BIT i table not long enough for DAC load "
+			       "detection comparison table\n");
+		return -EINVAL;
+	}
+
+	daccmpoffset = ROM16(bios->data[bitentry->offset + 13]);
+
+	/* doesn't exist on g80 */
+	if (!daccmpoffset)
+		return 0;
+
+	/*
+	 * The first value in the table, following the header, is the
+	 * comparison value, the second entry is a comparison value for
+	 * TV load detection.
+	 */
+
+	dacver = bios->data[daccmpoffset];
+	dacheaderlen = bios->data[daccmpoffset + 1];
+
+	if (dacver != 0x00 && dacver != 0x10) {
+		NV_WARN(dev, "DAC load detection comparison table version "
+			       "%d.%d not known\n", dacver >> 4, dacver & 0xf);
+		return -ENOSYS;
+	}
+
+	bios->pub.dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]);
+	bios->pub.tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]);
+
+	return 0;
+}
+
+static int parse_bit_lvds_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+	/*
+	 * Parses the LVDS table segment that the bit entry points to.
+	 * Starting at bitentry->offset:
+	 *
+	 * offset + 0  (16 bits): LVDS strap xlate table pointer
+	 */
+
+	if (bitentry->length != 2) {
+		NV_ERROR(dev, "Do not understand BIT LVDS table\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * No idea if it's still called the LVDS manufacturer table, but
+	 * the concept's close enough.
+	 */
+	bios->fp.lvdsmanufacturerpointer = ROM16(bios->data[bitentry->offset]);
+
+	return 0;
+}
+
+static int
+parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios,
+		      struct bit_entry *bitentry)
+{
+	/*
+	 * offset + 2  (8  bits): number of options in an
+	 * 	INIT_RAM_RESTRICT_ZM_REG_GROUP opcode option set
+	 * offset + 3  (16 bits): pointer to strap xlate table for RAM
+	 * 	restrict option selection
+	 *
+	 * There's a bunch of bits in this table other than the RAM restrict
+	 * stuff that we don't use - their use currently unknown
+	 */
+
+	uint16_t rr_strap_xlat;
+	uint8_t rr_group_count;
+	int i;
+
+	/*
+	 * Older bios versions don't have a sufficiently long table for
+	 * what we want
+	 */
+	if (bitentry->length < 0x5)
+		return 0;
+
+	if (bitentry->id[1] < 2) {
+		rr_group_count = bios->data[bitentry->offset + 2];
+		rr_strap_xlat = ROM16(bios->data[bitentry->offset + 3]);
+	} else {
+		rr_group_count = bios->data[bitentry->offset + 0];
+		rr_strap_xlat = ROM16(bios->data[bitentry->offset + 1]);
+	}
+
+	/* adjust length of INIT_87 */
+	for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != 0x87); i++);
+	itbl_entry[i].length += rr_group_count * 4;
+
+	/* set up multiplier for INIT_RAM_RESTRICT_ZM_REG_GROUP */
+	for (; itbl_entry[i].name && (itbl_entry[i].id != 0x8f); i++);
+	itbl_entry[i].length_multiplier = rr_group_count * 4;
+
+	init_ram_restrict_zm_reg_group_blocklen = itbl_entry[i].length_multiplier;
+	bios->ram_restrict_tbl_ptr = rr_strap_xlat;
+
+	return 0;
+}
+
+static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+	/*
+	 * Parses the pointer to the TMDS table
+	 *
+	 * Starting at bitentry->offset:
+	 *
+	 * offset + 0  (16 bits): TMDS table pointer
+	 *
+	 * The TMDS table is typically found just before the DCB table, with a
+	 * characteristic signature of 0x11,0x13 (1.1 being version, 0x13 being
+	 * length?)
+	 *
+	 * At offset +7 is a pointer to a script, which I don't know how to
+	 * run yet.
+	 * At offset +9 is a pointer to another script, likewise
+	 * Offset +11 has a pointer to a table where the first word is a pxclk
+	 * frequency and the second word a pointer to a script, which should be
+	 * run if the comparison pxclk frequency is less than the pxclk desired.
+	 * This repeats for decreasing comparison frequencies
+	 * Offset +13 has a pointer to a similar table
+	 * The selection of table (and possibly +7/+9 script) is dictated by
+	 * "or" from the DCB.
+	 */
+
+	uint16_t tmdstableptr, script1, script2;
+
+	if (bitentry->length != 2) {
+		NV_ERROR(dev, "Do not understand BIT TMDS table\n");
+		return -EINVAL;
+	}
+
+	tmdstableptr = ROM16(bios->data[bitentry->offset]);
+
+	if (tmdstableptr == 0x0) {
+		NV_ERROR(dev, "Pointer to TMDS table invalid\n");
+		return -EINVAL;
+	}
+
+	/* nv50+ has v2.0, but we don't parse it atm */
+	if (bios->data[tmdstableptr] != 0x11) {
+		NV_WARN(dev,
+			"TMDS table revision %d.%d not currently supported\n",
+			bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
+		return -ENOSYS;
+	}
+
+	/*
+	 * These two scripts are odd: they don't seem to get run even when
+	 * they are not stubbed.
+	 */
+	script1 = ROM16(bios->data[tmdstableptr + 7]);
+	script2 = ROM16(bios->data[tmdstableptr + 9]);
+	if (bios->data[script1] != 'q' || bios->data[script2] != 'q')
+		NV_WARN(dev, "TMDS table script pointers not stubbed\n");
+
+	bios->tmds.output0_script_ptr = ROM16(bios->data[tmdstableptr + 11]);
+	bios->tmds.output1_script_ptr = ROM16(bios->data[tmdstableptr + 13]);
+
+	return 0;
+}
+
+static int
+parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
+		      struct bit_entry *bitentry)
+{
+	/*
+	 * Parses the pointer to the G80 output script tables
+	 *
+	 * Starting at bitentry->offset:
+	 *
+	 * offset + 0  (16 bits): output script table pointer
+	 */
+
+	uint16_t outputscripttableptr;
+
+	if (bitentry->length != 3) {
+		NV_ERROR(dev, "Do not understand BIT U table\n");
+		return -EINVAL;
+	}
+
+	outputscripttableptr = ROM16(bios->data[bitentry->offset]);
+	bios->display.script_table_ptr = outputscripttableptr;
+	return 0;
+}
+
+static int
+parse_bit_displayport_tbl_entry(struct drm_device *dev, struct nvbios *bios,
+				struct bit_entry *bitentry)
+{
+	bios->display.dp_table_ptr = ROM16(bios->data[bitentry->offset]);
+	return 0;
+}
+
+struct bit_table {
+	const char id;
+	int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
+};
+
+#define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
+
+static int
+parse_bit_table(struct nvbios *bios, const uint16_t bitoffset,
+		struct bit_table *table)
+{
+	struct drm_device *dev = bios->dev;
+	uint8_t maxentries = bios->data[bitoffset + 4];
+	int i, offset;
+	struct bit_entry bitentry;
+
+	for (i = 0, offset = bitoffset + 6; i < maxentries; i++, offset += 6) {
+		bitentry.id[0] = bios->data[offset];
+
+		if (bitentry.id[0] != table->id)
+			continue;
+
+		bitentry.id[1] = bios->data[offset + 1];
+		bitentry.length = ROM16(bios->data[offset + 2]);
+		bitentry.offset = ROM16(bios->data[offset + 4]);
+
+		return table->parse_fn(dev, bios, &bitentry);
+	}
+
+	NV_INFO(dev, "BIT table '%c' not found\n", table->id);
+	return -ENOSYS;
+}
+
+static int
+parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset)
+{
+	int ret;
+
+	/*
+	 * The only restriction on parsing order currently is having 'i' first
+	 * for use of bios->*_version or bios->feature_byte while parsing;
+	 * functions shouldn't be actually *doing* anything apart from pulling
+	 * data from the image into the bios struct, thus no interdependencies
+	 */
+	ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('i', i));
+	if (ret) /* info? */
+		return ret;
+	if (bios->major_version >= 0x60) /* g80+ */
+		parse_bit_table(bios, bitoffset, &BIT_TABLE('A', A));
+	ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('C', C));
+	if (ret)
+		return ret;
+	parse_bit_table(bios, bitoffset, &BIT_TABLE('D', display));
+	ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('I', init));
+	if (ret)
+		return ret;
+	parse_bit_table(bios, bitoffset, &BIT_TABLE('M', M)); /* memory? */
+	parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds));
+	parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds));
+	parse_bit_table(bios, bitoffset, &BIT_TABLE('U', U));
+	parse_bit_table(bios, bitoffset, &BIT_TABLE('d', displayport));
+
+	return 0;
+}
+
+static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsigned int offset)
+{
+	/*
+	 * Parses the BMP structure for useful things, but does not act on them
+	 *
+	 * offset +   5: BMP major version
+	 * offset +   6: BMP minor version
+	 * offset +   9: BMP feature byte
+	 * offset +  10: BCD encoded BIOS version
+	 *
+	 * offset +  18: init script table pointer (for bios versions < 5.10h)
+	 * offset +  20: extra init script table pointer (for bios
+	 * versions < 5.10h)
+	 *
+	 * offset +  24: memory init table pointer (used on early bios versions)
+	 * offset +  26: SDR memory sequencing setup data table
+	 * offset +  28: DDR memory sequencing setup data table
+	 *
+	 * offset +  54: index of I2C CRTC pair to use for CRT output
+	 * offset +  55: index of I2C CRTC pair to use for TV output
+	 * offset +  56: index of I2C CRTC pair to use for flat panel output
+	 * offset +  58: write CRTC index for I2C pair 0
+	 * offset +  59: read CRTC index for I2C pair 0
+	 * offset +  60: write CRTC index for I2C pair 1
+	 * offset +  61: read CRTC index for I2C pair 1
+	 *
+	 * offset +  67: maximum internal PLL frequency (single stage PLL)
+	 * offset +  71: minimum internal PLL frequency (single stage PLL)
+	 *
+	 * offset +  75: script table pointers, as described in
+	 * parse_script_table_pointers
+	 *
+	 * offset +  89: TMDS single link output A table pointer
+	 * offset +  91: TMDS single link output B table pointer
+	 * offset +  95: LVDS single link output A table pointer
+	 * offset + 105: flat panel timings table pointer
+	 * offset + 107: flat panel strapping translation table pointer
+	 * offset + 117: LVDS manufacturer panel config table pointer
+	 * offset + 119: LVDS manufacturer strapping translation table pointer
+	 *
+	 * offset + 142: PLL limits table pointer
+	 *
+	 * offset + 156: minimum pixel clock for LVDS dual link
+	 */
+
+	uint8_t *bmp = &bios->data[offset], bmp_version_major, bmp_version_minor;
+	uint16_t bmplength;
+	uint16_t legacy_scripts_offset, legacy_i2c_offset;
+
+	/* load needed defaults in case we can't parse this info */
+	bios->bdcb.dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX;
+	bios->bdcb.dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX;
+	bios->bdcb.dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX;
+	bios->bdcb.dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX;
+	bios->pub.digital_min_front_porch = 0x4b;
+	bios->fmaxvco = 256000;
+	bios->fminvco = 128000;
+	bios->fp.duallink_transition_clk = 90000;
+
+	bmp_version_major = bmp[5];
+	bmp_version_minor = bmp[6];
+
+	NV_TRACE(dev, "BMP version %d.%d\n",
+		 bmp_version_major, bmp_version_minor);
+
+	/*
+	 * Make sure that 0x36 is blank and can't be mistaken for a DCB
+	 * pointer on early versions
+	 */
+	if (bmp_version_major < 5)
+		*(uint16_t *)&bios->data[0x36] = 0;
+
+	/*
+	 * Seems that the minor version was 1 for all major versions prior
+	 * to 5. Version 6 could theoretically exist, but I suspect BIT
+	 * happened instead.
+	 */
+	if ((bmp_version_major < 5 && bmp_version_minor != 1) || bmp_version_major > 5) {
+		NV_ERROR(dev, "You have an unsupported BMP version. "
+				"Please send in your bios\n");
+		return -ENOSYS;
+	}
+
+	if (bmp_version_major == 0)
+		/* nothing that's currently useful in this version */
+		return 0;
+	else if (bmp_version_major == 1)
+		bmplength = 44; /* exact for 1.01 */
+	else if (bmp_version_major == 2)
+		bmplength = 48; /* exact for 2.01 */
+	else if (bmp_version_major == 3)
+		bmplength = 54;
+		/* guessed - mem init tables added in this version */
+	else if (bmp_version_major == 4 || bmp_version_minor < 0x1)
+		/* don't know if 5.0 exists... */
+		bmplength = 62;
+		/* guessed - BMP I2C indices added in version 4*/
+	else if (bmp_version_minor < 0x6)
+		bmplength = 67; /* exact for 5.01 */
+	else if (bmp_version_minor < 0x10)
+		bmplength = 75; /* exact for 5.06 */
+	else if (bmp_version_minor == 0x10)
+		bmplength = 89; /* exact for 5.10h */
+	else if (bmp_version_minor < 0x14)
+		bmplength = 118; /* exact for 5.11h */
+	else if (bmp_version_minor < 0x24)
+		/*
+		 * Not sure of version where pll limits came in;
+		 * certainly exist by 0x24 though.
+		 */
+		/* length not exact: this is long enough to get lvds members */
+		bmplength = 123;
+	else if (bmp_version_minor < 0x27)
+		/*
+		 * Length not exact: this is long enough to get pll limit
+		 * member
+		 */
+		bmplength = 144;
+	else
+		/*
+		 * Length not exact: this is long enough to get dual link
+		 * transition clock.
+		 */
+		bmplength = 158;
+
+	/* checksum */
+	if (nv_cksum(bmp, 8)) {
+		NV_ERROR(dev, "Bad BMP checksum\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Bit 4 seems to indicate either a mobile bios or a quadro card --
+	 * mobile behaviour consistent (nv11+), quadro only seen nv18gl-nv36gl
+	 * (not nv10gl), bit 5 that the flat panel tables are present, and
+	 * bit 6 a tv bios.
+	 */
+	bios->feature_byte = bmp[9];
+
+	parse_bios_version(dev, bios, offset + 10);
+
+	if (bmp_version_major < 5 || bmp_version_minor < 0x10)
+		bios->old_style_init = true;
+	legacy_scripts_offset = 18;
+	if (bmp_version_major < 2)
+		legacy_scripts_offset -= 4;
+	bios->init_script_tbls_ptr = ROM16(bmp[legacy_scripts_offset]);
+	bios->extra_init_script_tbl_ptr = ROM16(bmp[legacy_scripts_offset + 2]);
+
+	if (bmp_version_major > 2) {	/* appears in BMP 3 */
+		bios->legacy.mem_init_tbl_ptr = ROM16(bmp[24]);
+		bios->legacy.sdr_seq_tbl_ptr = ROM16(bmp[26]);
+		bios->legacy.ddr_seq_tbl_ptr = ROM16(bmp[28]);
+	}
+
+	legacy_i2c_offset = 0x48;	/* BMP version 2 & 3 */
+	if (bmplength > 61)
+		legacy_i2c_offset = offset + 54;
+	bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset];
+	bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1];
+	bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2];
+	bios->bdcb.dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
+	bios->bdcb.dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
+	bios->bdcb.dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
+	bios->bdcb.dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
+
+	if (bmplength > 74) {
+		bios->fmaxvco = ROM32(bmp[67]);
+		bios->fminvco = ROM32(bmp[71]);
+	}
+	if (bmplength > 88)
+		parse_script_table_pointers(bios, offset + 75);
+	if (bmplength > 94) {
+		bios->tmds.output0_script_ptr = ROM16(bmp[89]);
+		bios->tmds.output1_script_ptr = ROM16(bmp[91]);
+		/*
+		 * Never observed in use with lvds scripts, but is reused for
+		 * 18/24 bit panel interface default for EDID equipped panels
+		 * (if_is_24bit not set directly to avoid any oscillation).
+		 */
+		bios->legacy.lvds_single_a_script_ptr = ROM16(bmp[95]);
+	}
+	if (bmplength > 108) {
+		bios->fp.fptablepointer = ROM16(bmp[105]);
+		bios->fp.fpxlatetableptr = ROM16(bmp[107]);
+		bios->fp.xlatwidth = 1;
+	}
+	if (bmplength > 120) {
+		bios->fp.lvdsmanufacturerpointer = ROM16(bmp[117]);
+		bios->fp.fpxlatemanufacturertableptr = ROM16(bmp[119]);
+	}
+	if (bmplength > 143)
+		bios->pll_limit_tbl_ptr = ROM16(bmp[142]);
+
+	if (bmplength > 157)
+		bios->fp.duallink_transition_clk = ROM16(bmp[156]) * 10;
+
+	return 0;
+}
+
+static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
+{
+	int i, j;
+
+	for (i = 0; i <= (n - len); i++) {
+		for (j = 0; j < len; j++)
+			if (data[i + j] != str[j])
+				break;
+		if (j == len)
+			return i;
+	}
+
+	return 0;
+}
+
+static int
+read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
+{
+	uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
+	int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
+	int recordoffset = 0, rdofs = 1, wrofs = 0;
+	uint8_t port_type = 0;
+
+	if (!i2ctable)
+		return -EINVAL;
+
+	if (dcb_version >= 0x30) {
+		if (i2ctable[0] != dcb_version) /* necessary? */
+			NV_WARN(dev,
+				"DCB I2C table version mismatch (%02X vs %02X)\n",
+				i2ctable[0], dcb_version);
+		dcb_i2c_ver = i2ctable[0];
+		headerlen = i2ctable[1];
+		if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
+			i2c_entries = i2ctable[2];
+		else
+			NV_WARN(dev,
+				"DCB I2C table has more entries than indexable "
+				"(%d entries, max index 15)\n", i2ctable[2]);
+		entry_len = i2ctable[3];
+		/* [4] is i2c_default_indices, read in parse_dcb_table() */
+	}
+	/*
+	 * It's your own fault if you call this function on a DCB 1.1 BIOS --
+	 * the test below is for DCB 1.2
+	 */
+	if (dcb_version < 0x14) {
+		recordoffset = 2;
+		rdofs = 0;
+		wrofs = 1;
+	}
+
+	if (index == 0xf)
+		return 0;
+	if (index > i2c_entries) {
+		NV_ERROR(dev, "DCB I2C index too big (%d > %d)\n",
+			 index, i2ctable[2]);
+		return -ENOENT;
+	}
+	if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
+		NV_ERROR(dev, "DCB I2C entry invalid\n");
+		return -EINVAL;
+	}
+
+	if (dcb_i2c_ver >= 0x30) {
+		port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
+
+		/*
+		 * Fixup for chips using same address offset for read and
+		 * write.
+		 */
+		if (port_type == 4)	/* seen on C51 */
+			rdofs = wrofs = 1;
+		if (port_type >= 5)	/* G80+ */
+			rdofs = wrofs = 0;
+	}
+
+	if (dcb_i2c_ver >= 0x40 && port_type != 5 && port_type != 6)
+		NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
+
+	i2c->port_type = port_type;
+	i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
+	i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
+
+	return 0;
+}
+
+static struct dcb_gpio_entry *
+new_gpio_entry(struct nvbios *bios)
+{
+	struct parsed_dcb_gpio *gpio = &bios->bdcb.gpio;
+
+	return &gpio->entry[gpio->entries++];
+}
+
+struct dcb_gpio_entry *
+nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->VBIOS;
+	int i;
+
+	for (i = 0; i < bios->bdcb.gpio.entries; i++) {
+		if (bios->bdcb.gpio.entry[i].tag != tag)
+			continue;
+
+		return &bios->bdcb.gpio.entry[i];
+	}
+
+	return NULL;
+}
+
+static void
+parse_dcb30_gpio_entry(struct nvbios *bios, uint16_t offset)
+{
+	struct dcb_gpio_entry *gpio;
+	uint16_t ent = ROM16(bios->data[offset]);
+	uint8_t line = ent & 0x1f,
+		tag = ent >> 5 & 0x3f,
+		flags = ent >> 11 & 0x1f;
+
+	if (tag == 0x3f)
+		return;
+
+	gpio = new_gpio_entry(bios);
+
+	gpio->tag = tag;
+	gpio->line = line;
+	gpio->invert = flags != 4;
+}
+
+static void
+parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset)
+{
+	struct dcb_gpio_entry *gpio;
+	uint32_t ent = ROM32(bios->data[offset]);
+	uint8_t line = ent & 0x1f,
+		tag = ent >> 8 & 0xff;
+
+	if (tag == 0xff)
+		return;
+
+	gpio = new_gpio_entry(bios);
+
+	/* Currently unused, we may need more fields parsed at some
+	 * point. */
+	gpio->tag = tag;
+	gpio->line = line;
+}
+
+static void
+parse_dcb_gpio_table(struct nvbios *bios)
+{
+	struct drm_device *dev = bios->dev;
+	uint16_t gpio_table_ptr = bios->bdcb.gpio_table_ptr;
+	uint8_t *gpio_table = &bios->data[gpio_table_ptr];
+	int header_len = gpio_table[1],
+	    entries = gpio_table[2],
+	    entry_len = gpio_table[3];
+	void (*parse_entry)(struct nvbios *, uint16_t) = NULL;
+	int i;
+
+	if (bios->bdcb.version >= 0x40) {
+		if (gpio_table_ptr && entry_len != 4) {
+			NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
+			return;
+		}
+
+		parse_entry = parse_dcb40_gpio_entry;
+
+	} else if (bios->bdcb.version >= 0x30) {
+		if (gpio_table_ptr && entry_len != 2) {
+			NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
+			return;
+		}
+
+		parse_entry = parse_dcb30_gpio_entry;
+
+	} else if (bios->bdcb.version >= 0x22) {
+		/*
+		 * DCBs older than v3.0 don't really have a GPIO
+		 * table, instead they keep some GPIO info at fixed
+		 * locations.
+		 */
+		uint16_t dcbptr = ROM16(bios->data[0x36]);
+		uint8_t *tvdac_gpio = &bios->data[dcbptr - 5];
+
+		if (tvdac_gpio[0] & 1) {
+			struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
+
+			gpio->tag = DCB_GPIO_TVDAC0;
+			gpio->line = tvdac_gpio[1] >> 4;
+			gpio->invert = tvdac_gpio[0] & 2;
+		}
+	}
+
+	if (!gpio_table_ptr)
+		return;
+
+	if (entries > DCB_MAX_NUM_GPIO_ENTRIES) {
+		NV_WARN(dev, "Too many entries in the DCB GPIO table.\n");
+		entries = DCB_MAX_NUM_GPIO_ENTRIES;
+	}
+
+	for (i = 0; i < entries; i++)
+		parse_entry(bios, gpio_table_ptr + header_len + entry_len * i);
+}
+
+struct dcb_connector_table_entry *
+nouveau_bios_connector_entry(struct drm_device *dev, int index)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->VBIOS;
+	struct dcb_connector_table_entry *cte;
+
+	if (index >= bios->bdcb.connector.entries)
+		return NULL;
+
+	cte = &bios->bdcb.connector.entry[index];
+	if (cte->type == 0xff)
+		return NULL;
+
+	return cte;
+}
+
+static void
+parse_dcb_connector_table(struct nvbios *bios)
+{
+	struct drm_device *dev = bios->dev;
+	struct dcb_connector_table *ct = &bios->bdcb.connector;
+	struct dcb_connector_table_entry *cte;
+	uint8_t *conntab = &bios->data[bios->bdcb.connector_table_ptr];
+	uint8_t *entry;
+	int i;
+
+	if (!bios->bdcb.connector_table_ptr) {
+		NV_DEBUG(dev, "No DCB connector table present\n");
+		return;
+	}
+
+	NV_INFO(dev, "DCB connector table: VHER 0x%02x %d %d %d\n",
+		conntab[0], conntab[1], conntab[2], conntab[3]);
+	if ((conntab[0] != 0x30 && conntab[0] != 0x40) ||
+	    (conntab[3] != 2 && conntab[3] != 4)) {
+		NV_ERROR(dev, "  Unknown!  Please report.\n");
+		return;
+	}
+
+	ct->entries = conntab[2];
+
+	entry = conntab + conntab[1];
+	cte = &ct->entry[0];
+	for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) {
+		if (conntab[3] == 2)
+			cte->entry = ROM16(entry[0]);
+		else
+			cte->entry = ROM32(entry[0]);
+		cte->type  = (cte->entry & 0x000000ff) >> 0;
+		cte->index = (cte->entry & 0x00000f00) >> 8;
+		switch (cte->entry & 0x00033000) {
+		case 0x00001000:
+			cte->gpio_tag = 0x07;
+			break;
+		case 0x00002000:
+			cte->gpio_tag = 0x08;
+			break;
+		case 0x00010000:
+			cte->gpio_tag = 0x51;
+			break;
+		case 0x00020000:
+			cte->gpio_tag = 0x52;
+			break;
+		default:
+			cte->gpio_tag = 0xff;
+			break;
+		}
+
+		if (cte->type == 0xff)
+			continue;
+
+		NV_INFO(dev, "  %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
+			i, cte->entry, cte->type, cte->index, cte->gpio_tag);
+	}
+}
+
+static struct dcb_entry *new_dcb_entry(struct parsed_dcb *dcb)
+{
+	struct dcb_entry *entry = &dcb->entry[dcb->entries];
+
+	memset(entry, 0, sizeof(struct dcb_entry));
+	entry->index = dcb->entries++;
+
+	return entry;
+}
+
+static void fabricate_vga_output(struct parsed_dcb *dcb, int i2c, int heads)
+{
+	struct dcb_entry *entry = new_dcb_entry(dcb);
+
+	entry->type = 0;
+	entry->i2c_index = i2c;
+	entry->heads = heads;
+	entry->location = DCB_LOC_ON_CHIP;
+	/* "or" mostly unused in early gen crt modesetting, 0 is fine */
+}
+
+static void fabricate_dvi_i_output(struct parsed_dcb *dcb, bool twoHeads)
+{
+	struct dcb_entry *entry = new_dcb_entry(dcb);
+
+	entry->type = 2;
+	entry->i2c_index = LEGACY_I2C_PANEL;
+	entry->heads = twoHeads ? 3 : 1;
+	entry->location = !DCB_LOC_ON_CHIP;	/* ie OFF CHIP */
+	entry->or = 1;	/* means |0x10 gets set on CRE_LCD__INDEX */
+	entry->duallink_possible = false; /* SiI164 and co. are single link */
+
+#if 0
+	/*
+	 * For dvi-a either crtc probably works, but my card appears to only
+	 * support dvi-d.  "nvidia" still attempts to program it for dvi-a,
+	 * doing the full fp output setup (program 0x6808.. fp dimension regs,
+	 * setting 0x680848 to 0x10000111 to enable, maybe setting 0x680880);
+	 * the monitor picks up the mode res ok and lights up, but no pixel
+	 * data appears, so the board manufacturer probably connected up the
+	 * sync lines, but missed the video traces / components
+	 *
+	 * with this introduction, dvi-a left as an exercise for the reader.
+	 */
+	fabricate_vga_output(dcb, LEGACY_I2C_PANEL, entry->heads);
+#endif
+}
+
+static void fabricate_tv_output(struct parsed_dcb *dcb, bool twoHeads)
+{
+	struct dcb_entry *entry = new_dcb_entry(dcb);
+
+	entry->type = 1;
+	entry->i2c_index = LEGACY_I2C_TV;
+	entry->heads = twoHeads ? 3 : 1;
+	entry->location = !DCB_LOC_ON_CHIP;	/* ie OFF CHIP */
+}
+
+static bool
+parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
+		  uint32_t conn, uint32_t conf, struct dcb_entry *entry)
+{
+	entry->type = conn & 0xf;
+	entry->i2c_index = (conn >> 4) & 0xf;
+	entry->heads = (conn >> 8) & 0xf;
+	if (bdcb->version >= 0x40)
+		entry->connector = (conn >> 12) & 0xf;
+	entry->bus = (conn >> 16) & 0xf;
+	entry->location = (conn >> 20) & 0x3;
+	entry->or = (conn >> 24) & 0xf;
+	/*
+	 * Normal entries consist of a single bit, but dual link has the
+	 * next most significant bit set too
+	 */
+	entry->duallink_possible =
+			((1 << (ffs(entry->or) - 1)) * 3 == entry->or);
+
+	switch (entry->type) {
+	case OUTPUT_ANALOG:
+		/*
+		 * Although the rest of a CRT conf dword is usually
+		 * zeros, mac biosen have stuff there so we must mask
+		 */
+		entry->crtconf.maxfreq = (bdcb->version < 0x30) ?
+					 (conf & 0xffff) * 10 :
+					 (conf & 0xff) * 10000;
+		break;
+	case OUTPUT_LVDS:
+		{
+		uint32_t mask;
+		if (conf & 0x1)
+			entry->lvdsconf.use_straps_for_mode = true;
+		if (bdcb->version < 0x22) {
+			mask = ~0xd;
+			/*
+			 * The laptop in bug 14567 lies and claims to not use
+			 * straps when it does, so assume all DCB 2.0 laptops
+			 * use straps, until a broken EDID using one is produced
+			 */
+			entry->lvdsconf.use_straps_for_mode = true;
+			/*
+			 * Both 0x4 and 0x8 show up in v2.0 tables; assume they
+			 * mean the same thing (probably wrong, but might work)
+			 */
+			if (conf & 0x4 || conf & 0x8)
+				entry->lvdsconf.use_power_scripts = true;
+		} else {
+			mask = ~0x5;
+			if (conf & 0x4)
+				entry->lvdsconf.use_power_scripts = true;
+		}
+		if (conf & mask) {
+			/*
+			 * Until we even try to use these on G8x, it's
+			 * useless reporting unknown bits.  They all are.
+			 */
+			if (bdcb->version >= 0x40)
+				break;
+
+			NV_ERROR(dev, "Unknown LVDS configuration bits, "
+				      "please report\n");
+		}
+		break;
+		}
+	case OUTPUT_TV:
+	{
+		if (bdcb->version >= 0x30)
+			entry->tvconf.has_component_output = conf & (0x8 << 4);
+		else
+			entry->tvconf.has_component_output = false;
+
+		break;
+	}
+	case OUTPUT_DP:
+		entry->dpconf.sor.link = (conf & 0x00000030) >> 4;
+		entry->dpconf.link_bw = (conf & 0x00e00000) >> 21;
+		switch ((conf & 0x0f000000) >> 24) {
+		case 0xf:
+			entry->dpconf.link_nr = 4;
+			break;
+		case 0x3:
+			entry->dpconf.link_nr = 2;
+			break;
+		default:
+			entry->dpconf.link_nr = 1;
+			break;
+		}
+		break;
+	case OUTPUT_TMDS:
+		entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4;
+		break;
+	case 0xe:
+		/* weird g80 mobile type that "nv" treats as a terminator */
+		bdcb->dcb.entries--;
+		return false;
+	}
+
+	/* unsure what DCB version introduces this, 3.0? */
+	if (conf & 0x100000)
+		entry->i2c_upper_default = true;
+
+	return true;
+}
+
+static bool
+parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
+		  uint32_t conn, uint32_t conf, struct dcb_entry *entry)
+{
+	if (conn != 0xf0003f00 && conn != 0xf2247f10 && conn != 0xf2204001 &&
+	    conn != 0xf2204301 && conn != 0xf2204311 && conn != 0xf2208001 &&
+	    conn != 0xf2244001 && conn != 0xf2244301 && conn != 0xf2244311 &&
+	    conn != 0xf4204011 && conn != 0xf4208011 && conn != 0xf4248011 &&
+	    conn != 0xf2045ff2 && conn != 0xf2045f14 && conn != 0xf207df14 &&
+	    conn != 0xf2205004 && conn != 0xf2209004) {
+		NV_ERROR(dev, "Unknown DCB 1.5 entry, please report\n");
+
+		/* cause output setting to fail for !TV, so message is seen */
+		if ((conn & 0xf) != 0x1)
+			dcb->entries = 0;
+
+		return false;
+	}
+	/* most of the below is a "best guess" atm */
+	entry->type = conn & 0xf;
+	if (entry->type == 2)
+		/* another way of specifying straps based lvds... */
+		entry->type = OUTPUT_LVDS;
+	if (entry->type == 4) { /* digital */
+		if (conn & 0x10)
+			entry->type = OUTPUT_LVDS;
+		else
+			entry->type = OUTPUT_TMDS;
+	}
+	/* what's in bits 5-13? could be some encoder maker thing, in tv case */
+	entry->i2c_index = (conn >> 14) & 0xf;
+	/* raw heads field is in range 0-1, so move to 1-2 */
+	entry->heads = ((conn >> 18) & 0x7) + 1;
+	entry->location = (conn >> 21) & 0xf;
+	/* unused: entry->bus = (conn >> 25) & 0x7; */
+	/* set or to be same as heads -- hopefully safe enough */
+	entry->or = entry->heads;
+	entry->duallink_possible = false;
+
+	switch (entry->type) {
+	case OUTPUT_ANALOG:
+		entry->crtconf.maxfreq = (conf & 0xffff) * 10;
+		break;
+	case OUTPUT_LVDS:
+		/*
+		 * This is probably buried in conn's unknown bits.
+		 * This will upset EDID-ful models, if they exist
+		 */
+		entry->lvdsconf.use_straps_for_mode = true;
+		entry->lvdsconf.use_power_scripts = true;
+		break;
+	case OUTPUT_TMDS:
+		/*
+		 * Invent a DVI-A output, by copying the fields of the DVI-D
+		 * output; reported to work by math_b on an NV20(!).
+		 */
+		fabricate_vga_output(dcb, entry->i2c_index, entry->heads);
+		break;
+	case OUTPUT_TV:
+		entry->tvconf.has_component_output = false;
+		break;
+	}
+
+	return true;
+}
+
+static bool parse_dcb_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
+			    uint32_t conn, uint32_t conf)
+{
+	struct dcb_entry *entry = new_dcb_entry(&bdcb->dcb);
+	bool ret;
+
+	if (bdcb->version >= 0x20)
+		ret = parse_dcb20_entry(dev, bdcb, conn, conf, entry);
+	else
+		ret = parse_dcb15_entry(dev, &bdcb->dcb, conn, conf, entry);
+	if (!ret)
+		return ret;
+
+	read_dcb_i2c_entry(dev, bdcb->version, bdcb->i2c_table,
+			   entry->i2c_index, &bdcb->dcb.i2c[entry->i2c_index]);
+
+	return true;
+}
+
+static
+void merge_like_dcb_entries(struct drm_device *dev, struct parsed_dcb *dcb)
+{
+	/*
+	 * DCB v2.0 lists each output combination separately.
+	 * Here we merge compatible entries to have fewer outputs, with
+	 * more options
+	 */
+
+	int i, newentries = 0;
+
+	for (i = 0; i < dcb->entries; i++) {
+		struct dcb_entry *ient = &dcb->entry[i];
+		int j;
+
+		for (j = i + 1; j < dcb->entries; j++) {
+			struct dcb_entry *jent = &dcb->entry[j];
+
+			if (jent->type == 100) /* already merged entry */
+				continue;
+
+			/* merge heads field when all other fields the same */
+			if (jent->i2c_index == ient->i2c_index &&
+			    jent->type == ient->type &&
+			    jent->location == ient->location &&
+			    jent->or == ient->or) {
+				NV_TRACE(dev, "Merging DCB entries %d and %d\n",
+					 i, j);
+				ient->heads |= jent->heads;
+				jent->type = 100; /* dummy value */
+			}
+		}
+	}
+
+	/* Compact entries merged into others out of dcb */
+	for (i = 0; i < dcb->entries; i++) {
+		if (dcb->entry[i].type == 100)
+			continue;
+
+		if (newentries != i) {
+			dcb->entry[newentries] = dcb->entry[i];
+			dcb->entry[newentries].index = newentries;
+		}
+		newentries++;
+	}
+
+	dcb->entries = newentries;
+}
+
+static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
+{
+	struct bios_parsed_dcb *bdcb = &bios->bdcb;
+	struct parsed_dcb *dcb;
+	uint16_t dcbptr, i2ctabptr = 0;
+	uint8_t *dcbtable;
+	uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
+	bool configblock = true;
+	int recordlength = 8, confofs = 4;
+	int i;
+
+	dcb = bios->pub.dcb = &bdcb->dcb;
+	dcb->entries = 0;
+
+	/* get the offset from 0x36 */
+	dcbptr = ROM16(bios->data[0x36]);
+
+	if (dcbptr == 0x0) {
+		NV_WARN(dev, "No output data (DCB) found in BIOS, "
+			       "assuming a CRT output exists\n");
+		/* this situation likely means a really old card, pre DCB */
+		fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
+
+		if (nv04_tv_identify(dev,
+				     bios->legacy.i2c_indices.tv) >= 0)
+			fabricate_tv_output(dcb, twoHeads);
+
+		return 0;
+	}
+
+	dcbtable = &bios->data[dcbptr];
+
+	/* get DCB version */
+	bdcb->version = dcbtable[0];
+	NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n",
+		 bdcb->version >> 4, bdcb->version & 0xf);
+
+	if (bdcb->version >= 0x20) { /* NV17+ */
+		uint32_t sig;
+
+		if (bdcb->version >= 0x30) { /* NV40+ */
+			headerlen = dcbtable[1];
+			entries = dcbtable[2];
+			recordlength = dcbtable[3];
+			i2ctabptr = ROM16(dcbtable[4]);
+			sig = ROM32(dcbtable[6]);
+			bdcb->gpio_table_ptr = ROM16(dcbtable[10]);
+			bdcb->connector_table_ptr = ROM16(dcbtable[20]);
+		} else {
+			i2ctabptr = ROM16(dcbtable[2]);
+			sig = ROM32(dcbtable[4]);
+			headerlen = 8;
+		}
+
+		if (sig != 0x4edcbdcb) {
+			NV_ERROR(dev, "Bad Display Configuration Block "
+					"signature (%08X)\n", sig);
+			return -EINVAL;
+		}
+	} else if (bdcb->version >= 0x15) { /* some NV11 and NV20 */
+		char sig[8] = { 0 };
+
+		strncpy(sig, (char *)&dcbtable[-7], 7);
+		i2ctabptr = ROM16(dcbtable[2]);
+		recordlength = 10;
+		confofs = 6;
+
+		if (strcmp(sig, "DEV_REC")) {
+			NV_ERROR(dev, "Bad Display Configuration Block "
+					"signature (%s)\n", sig);
+			return -EINVAL;
+		}
+	} else {
+		/*
+		 * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but always
+		 * has the same single (crt) entry, even when tv-out present, so
+		 * the conclusion is this version cannot really be used.
+		 * v1.2 tables (some NV6/10, and NV15+) normally have the same
+		 * 5 entries, which are not specific to the card and so no use.
+		 * v1.2 does have an I2C table that read_dcb_i2c_table can
+		 * handle, but cards exist (nv11 in #14821) with a bad i2c table
+		 * pointer, so use the indices parsed in parse_bmp_structure.
+		 * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
+		 */
+		NV_TRACEWARN(dev, "No useful information in BIOS output table; "
+				  "adding all possible outputs\n");
+		fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
+
+		/*
+		 * Attempt to detect TV before DVI because the test
+		 * for the former is more accurate and it rules the
+		 * latter out.
+		 */
+		if (nv04_tv_identify(dev,
+				     bios->legacy.i2c_indices.tv) >= 0)
+			fabricate_tv_output(dcb, twoHeads);
+
+		else if (bios->tmds.output0_script_ptr ||
+			 bios->tmds.output1_script_ptr)
+			fabricate_dvi_i_output(dcb, twoHeads);
+
+		return 0;
+	}
+
+	if (!i2ctabptr)
+		NV_WARN(dev, "No pointer to DCB I2C port table\n");
+	else {
+		bdcb->i2c_table = &bios->data[i2ctabptr];
+		if (bdcb->version >= 0x30)
+			bdcb->i2c_default_indices = bdcb->i2c_table[4];
+	}
+
+	parse_dcb_gpio_table(bios);
+	parse_dcb_connector_table(bios);
+
+	if (entries > DCB_MAX_NUM_ENTRIES)
+		entries = DCB_MAX_NUM_ENTRIES;
+
+	for (i = 0; i < entries; i++) {
+		uint32_t connection, config = 0;
+
+		connection = ROM32(dcbtable[headerlen + recordlength * i]);
+		if (configblock)
+			config = ROM32(dcbtable[headerlen + confofs + recordlength * i]);
+
+		/* seen on an NV11 with DCB v1.5 */
+		if (connection == 0x00000000)
+			break;
+
+		/* seen on an NV17 with DCB v2.0 */
+		if (connection == 0xffffffff)
+			break;
+
+		if ((connection & 0x0000000f) == 0x0000000f)
+			continue;
+
+		NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n",
+			     dcb->entries, connection, config);
+
+		if (!parse_dcb_entry(dev, bdcb, connection, config))
+			break;
+	}
+
+	/*
+	 * apart for v2.1+ not being known for requiring merging, this
+	 * guarantees dcbent->index is the index of the entry in the rom image
+	 */
+	if (bdcb->version < 0x21)
+		merge_like_dcb_entries(dev, dcb);
+
+	return dcb->entries ? 0 : -ENXIO;
+}
+
+static void
+fixup_legacy_connector(struct nvbios *bios)
+{
+	struct bios_parsed_dcb *bdcb = &bios->bdcb;
+	struct parsed_dcb *dcb = &bdcb->dcb;
+	int high = 0, i;
+
+	/*
+	 * DCB 3.0 also has the table in most cases, but there are some cards
+	 * where the table is filled with stub entries, and the DCB entriy
+	 * indices are all 0.  We don't need the connector indices on pre-G80
+	 * chips (yet?) so limit the use to DCB 4.0 and above.
+	 */
+	if (bdcb->version >= 0x40)
+		return;
+
+	/*
+	 * No known connector info before v3.0, so make it up.  the rule here
+	 * is: anything on the same i2c bus is considered to be on the same
+	 * connector.  any output without an associated i2c bus is assigned
+	 * its own unique connector index.
+	 */
+	for (i = 0; i < dcb->entries; i++) {
+		if (dcb->entry[i].i2c_index == 0xf)
+			continue;
+
+		/*
+		 * Ignore the I2C index for on-chip TV-out, as there
+		 * are cards with bogus values (nv31m in bug 23212),
+		 * and it's otherwise useless.
+		 */
+		if (dcb->entry[i].type == OUTPUT_TV &&
+		    dcb->entry[i].location == DCB_LOC_ON_CHIP) {
+			dcb->entry[i].i2c_index = 0xf;
+			continue;
+		}
+
+		dcb->entry[i].connector = dcb->entry[i].i2c_index;
+		if (dcb->entry[i].connector > high)
+			high = dcb->entry[i].connector;
+	}
+
+	for (i = 0; i < dcb->entries; i++) {
+		if (dcb->entry[i].i2c_index != 0xf)
+			continue;
+
+		dcb->entry[i].connector = ++high;
+	}
+}
+
+static void
+fixup_legacy_i2c(struct nvbios *bios)
+{
+	struct parsed_dcb *dcb = &bios->bdcb.dcb;
+	int i;
+
+	for (i = 0; i < dcb->entries; i++) {
+		if (dcb->entry[i].i2c_index == LEGACY_I2C_CRT)
+			dcb->entry[i].i2c_index = bios->legacy.i2c_indices.crt;
+		if (dcb->entry[i].i2c_index == LEGACY_I2C_PANEL)
+			dcb->entry[i].i2c_index = bios->legacy.i2c_indices.panel;
+		if (dcb->entry[i].i2c_index == LEGACY_I2C_TV)
+			dcb->entry[i].i2c_index = bios->legacy.i2c_indices.tv;
+	}
+}
+
+static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bios, uint16_t hwsq_offset, int entry)
+{
+	/*
+	 * The header following the "HWSQ" signature has the number of entries,
+	 * and the entry size
+	 *
+	 * An entry consists of a dword to write to the sequencer control reg
+	 * (0x00001304), followed by the ucode bytes, written sequentially,
+	 * starting at reg 0x00001400
+	 */
+
+	uint8_t bytes_to_write;
+	uint16_t hwsq_entry_offset;
+	int i;
+
+	if (bios->data[hwsq_offset] <= entry) {
+		NV_ERROR(dev, "Too few entries in HW sequencer table for "
+				"requested entry\n");
+		return -ENOENT;
+	}
+
+	bytes_to_write = bios->data[hwsq_offset + 1];
+
+	if (bytes_to_write != 36) {
+		NV_ERROR(dev, "Unknown HW sequencer entry size\n");
+		return -EINVAL;
+	}
+
+	NV_TRACE(dev, "Loading NV17 power sequencing microcode\n");
+
+	hwsq_entry_offset = hwsq_offset + 2 + entry * bytes_to_write;
+
+	/* set sequencer control */
+	bios_wr32(bios, 0x00001304, ROM32(bios->data[hwsq_entry_offset]));
+	bytes_to_write -= 4;
+
+	/* write ucode */
+	for (i = 0; i < bytes_to_write; i += 4)
+		bios_wr32(bios, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4]));
+
+	/* twiddle NV_PBUS_DEBUG_4 */
+	bios_wr32(bios, NV_PBUS_DEBUG_4, bios_rd32(bios, NV_PBUS_DEBUG_4) | 0x18);
+
+	return 0;
+}
+
+static int load_nv17_hw_sequencer_ucode(struct drm_device *dev,
+					struct nvbios *bios)
+{
+	/*
+	 * BMP based cards, from NV17, need a microcode loading to correctly
+	 * control the GPIO etc for LVDS panels
+	 *
+	 * BIT based cards seem to do this directly in the init scripts
+	 *
+	 * The microcode entries are found by the "HWSQ" signature.
+	 */
+
+	const uint8_t hwsq_signature[] = { 'H', 'W', 'S', 'Q' };
+	const int sz = sizeof(hwsq_signature);
+	int hwsq_offset;
+
+	hwsq_offset = findstr(bios->data, bios->length, hwsq_signature, sz);
+	if (!hwsq_offset)
+		return 0;
+
+	/* always use entry 0? */
+	return load_nv17_hwsq_ucode_entry(dev, bios, hwsq_offset + sz, 0);
+}
+
+uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->VBIOS;
+	const uint8_t edid_sig[] = {
+			0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
+	uint16_t offset = 0;
+	uint16_t newoffset;
+	int searchlen = NV_PROM_SIZE;
+
+	if (bios->fp.edid)
+		return bios->fp.edid;
+
+	while (searchlen) {
+		newoffset = findstr(&bios->data[offset], searchlen,
+								edid_sig, 8);
+		if (!newoffset)
+			return NULL;
+		offset += newoffset;
+		if (!nv_cksum(&bios->data[offset], EDID1_LEN))
+			break;
+
+		searchlen -= offset;
+		offset++;
+	}
+
+	NV_TRACE(dev, "Found EDID in BIOS\n");
+
+	return bios->fp.edid = &bios->data[offset];
+}
+
+void
+nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
+			    struct dcb_entry *dcbent)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->VBIOS;
+	struct init_exec iexec = { true, false };
+
+	bios->display.output = dcbent;
+	parse_init_table(bios, table, &iexec);
+	bios->display.output = NULL;
+}
+
+static bool NVInitVBIOS(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->VBIOS;
+
+	memset(bios, 0, sizeof(struct nvbios));
+	bios->dev = dev;
+
+	if (!NVShadowVBIOS(dev, bios->data))
+		return false;
+
+	bios->length = NV_PROM_SIZE;
+	return true;
+}
+
+static int nouveau_parse_vbios_struct(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->VBIOS;
+	const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' };
+	const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 };
+	int offset;
+
+	offset = findstr(bios->data, bios->length,
+					bit_signature, sizeof(bit_signature));
+	if (offset) {
+		NV_TRACE(dev, "BIT BIOS found\n");
+		return parse_bit_structure(bios, offset + 6);
+	}
+
+	offset = findstr(bios->data, bios->length,
+					bmp_signature, sizeof(bmp_signature));
+	if (offset) {
+		NV_TRACE(dev, "BMP BIOS found\n");
+		return parse_bmp_structure(dev, bios, offset);
+	}
+
+	NV_ERROR(dev, "No known BIOS signature found\n");
+	return -ENODEV;
+}
+
+int
+nouveau_run_vbios_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->VBIOS;
+	int i, ret = 0;
+
+	NVLockVgaCrtcs(dev, false);
+	if (nv_two_heads(dev))
+		NVSetOwner(dev, bios->state.crtchead);
+
+	if (bios->major_version < 5)	/* BMP only */
+		load_nv17_hw_sequencer_ucode(dev, bios);
+
+	if (bios->execute) {
+		bios->fp.last_script_invoc = 0;
+		bios->fp.lvds_init_run = false;
+	}
+
+	parse_init_tables(bios);
+
+	/*
+	 * Runs some additional script seen on G8x VBIOSen.  The VBIOS'
+	 * parser will run this right after the init tables, the binary
+	 * driver appears to run it at some point later.
+	 */
+	if (bios->some_script_ptr) {
+		struct init_exec iexec = {true, false};
+
+		NV_INFO(dev, "Parsing VBIOS init table at offset 0x%04X\n",
+			bios->some_script_ptr);
+		parse_init_table(bios, bios->some_script_ptr, &iexec);
+	}
+
+	if (dev_priv->card_type >= NV_50) {
+		for (i = 0; i < bios->bdcb.dcb.entries; i++) {
+			nouveau_bios_run_display_table(dev,
+						       &bios->bdcb.dcb.entry[i],
+						       0, 0);
+		}
+	}
+
+	NVLockVgaCrtcs(dev, true);
+
+	return ret;
+}
+
+static void
+nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->VBIOS;
+	struct dcb_i2c_entry *entry;
+	int i;
+
+	entry = &bios->bdcb.dcb.i2c[0];
+	for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++)
+		nouveau_i2c_fini(dev, entry);
+}
+
+int
+nouveau_bios_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->VBIOS;
+	uint32_t saved_nv_pextdev_boot_0;
+	bool was_locked;
+	int ret;
+
+	dev_priv->vbios = &bios->pub;
+
+	if (!NVInitVBIOS(dev))
+		return -ENODEV;
+
+	ret = nouveau_parse_vbios_struct(dev);
+	if (ret)
+		return ret;
+
+	ret = parse_dcb_table(dev, bios, nv_two_heads(dev));
+	if (ret)
+		return ret;
+
+	fixup_legacy_i2c(bios);
+	fixup_legacy_connector(bios);
+
+	if (!bios->major_version)	/* we don't run version 0 bios */
+		return 0;
+
+	/* these will need remembering across a suspend */
+	saved_nv_pextdev_boot_0 = bios_rd32(bios, NV_PEXTDEV_BOOT_0);
+	bios->state.saved_nv_pfb_cfg0 = bios_rd32(bios, NV_PFB_CFG0);
+
+	/* init script execution disabled */
+	bios->execute = false;
+
+	/* ... unless card isn't POSTed already */
+	if (dev_priv->card_type >= NV_10 &&
+	    NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
+	    NVReadVgaCrtc(dev, 0, 0x1a) == 0) {
+		NV_INFO(dev, "Adaptor not initialised\n");
+		if (dev_priv->card_type < NV_50) {
+			NV_ERROR(dev, "Unable to POST this chipset\n");
+			return -ENODEV;
+		}
+
+		NV_INFO(dev, "Running VBIOS init tables\n");
+		bios->execute = true;
+	}
+
+	bios_wr32(bios, NV_PEXTDEV_BOOT_0, saved_nv_pextdev_boot_0);
+
+	ret = nouveau_run_vbios_init(dev);
+	if (ret) {
+		dev_priv->vbios = NULL;
+		return ret;
+	}
+
+	/* feature_byte on BMP is poor, but init always sets CR4B */
+	was_locked = NVLockVgaCrtcs(dev, false);
+	if (bios->major_version < 5)
+		bios->is_mobile = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_4B) & 0x40;
+
+	/* all BIT systems need p_f_m_t for digital_min_front_porch */
+	if (bios->is_mobile || bios->major_version >= 5)
+		ret = parse_fp_mode_table(dev, bios);
+	NVLockVgaCrtcs(dev, was_locked);
+
+	/* allow subsequent scripts to execute */
+	bios->execute = true;
+
+	return 0;
+}
+
+void
+nouveau_bios_takedown(struct drm_device *dev)
+{
+	nouveau_bios_i2c_devices_takedown(dev);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
new file mode 100644
index 0000000..1d5f10b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2007-2008 Nouveau Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_BIOS_H__
+#define __NOUVEAU_BIOS_H__
+
+#include "nvreg.h"
+#include "nouveau_i2c.h"
+
+#define DCB_MAX_NUM_ENTRIES 16
+#define DCB_MAX_NUM_I2C_ENTRIES 16
+#define DCB_MAX_NUM_GPIO_ENTRIES 32
+#define DCB_MAX_NUM_CONNECTOR_ENTRIES 16
+
+#define DCB_LOC_ON_CHIP 0
+
+struct dcb_entry {
+	int index;	/* may not be raw dcb index if merging has happened */
+	uint8_t type;
+	uint8_t i2c_index;
+	uint8_t heads;
+	uint8_t connector;
+	uint8_t bus;
+	uint8_t location;
+	uint8_t or;
+	bool duallink_possible;
+	union {
+		struct sor_conf {
+			int link;
+		} sorconf;
+		struct {
+			int maxfreq;
+		} crtconf;
+		struct {
+			struct sor_conf sor;
+			bool use_straps_for_mode;
+			bool use_power_scripts;
+		} lvdsconf;
+		struct {
+			bool has_component_output;
+		} tvconf;
+		struct {
+			struct sor_conf sor;
+			int link_nr;
+			int link_bw;
+		} dpconf;
+		struct {
+			struct sor_conf sor;
+		} tmdsconf;
+	};
+	bool i2c_upper_default;
+};
+
+struct dcb_i2c_entry {
+	uint8_t port_type;
+	uint8_t read, write;
+	struct nouveau_i2c_chan *chan;
+};
+
+struct parsed_dcb {
+	int entries;
+	struct dcb_entry entry[DCB_MAX_NUM_ENTRIES];
+	struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
+};
+
+enum dcb_gpio_tag {
+	DCB_GPIO_TVDAC0 = 0xc,
+	DCB_GPIO_TVDAC1 = 0x2d,
+};
+
+struct dcb_gpio_entry {
+	enum dcb_gpio_tag tag;
+	int line;
+	bool invert;
+};
+
+struct parsed_dcb_gpio {
+	int entries;
+	struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
+};
+
+struct dcb_connector_table_entry {
+	uint32_t entry;
+	uint8_t type;
+	uint8_t index;
+	uint8_t gpio_tag;
+};
+
+struct dcb_connector_table {
+	int entries;
+	struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
+};
+
+struct bios_parsed_dcb {
+	uint8_t version;
+
+	struct parsed_dcb dcb;
+
+	uint8_t *i2c_table;
+	uint8_t i2c_default_indices;
+
+	uint16_t gpio_table_ptr;
+	struct parsed_dcb_gpio gpio;
+	uint16_t connector_table_ptr;
+	struct dcb_connector_table connector;
+};
+
+enum nouveau_encoder_type {
+	OUTPUT_ANALOG = 0,
+	OUTPUT_TV = 1,
+	OUTPUT_TMDS = 2,
+	OUTPUT_LVDS = 3,
+	OUTPUT_DP = 6,
+	OUTPUT_ANY = -1
+};
+
+enum nouveau_or {
+	OUTPUT_A = (1 << 0),
+	OUTPUT_B = (1 << 1),
+	OUTPUT_C = (1 << 2)
+};
+
+enum LVDS_script {
+	/* Order *does* matter here */
+	LVDS_INIT = 1,
+	LVDS_RESET,
+	LVDS_BACKLIGHT_ON,
+	LVDS_BACKLIGHT_OFF,
+	LVDS_PANEL_ON,
+	LVDS_PANEL_OFF
+};
+
+/* changing these requires matching changes to reg tables in nv_get_clock */
+#define MAX_PLL_TYPES	4
+enum pll_types {
+	NVPLL,
+	MPLL,
+	VPLL1,
+	VPLL2
+};
+
+struct pll_lims {
+	struct {
+		int minfreq;
+		int maxfreq;
+		int min_inputfreq;
+		int max_inputfreq;
+
+		uint8_t min_m;
+		uint8_t max_m;
+		uint8_t min_n;
+		uint8_t max_n;
+	} vco1, vco2;
+
+	uint8_t max_log2p;
+	/*
+	 * for most pre nv50 cards setting a log2P of 7 (the common max_log2p
+	 * value) is no different to 6 (at least for vplls) so allowing the MNP
+	 * calc to use 7 causes the generated clock to be out by a factor of 2.
+	 * however, max_log2p cannot be fixed-up during parsing as the
+	 * unmodified max_log2p value is still needed for setting mplls, hence
+	 * an additional max_usable_log2p member
+	 */
+	uint8_t max_usable_log2p;
+	uint8_t log2p_bias;
+
+	uint8_t min_p;
+	uint8_t max_p;
+
+	int refclk;
+};
+
+struct nouveau_bios_info {
+	struct parsed_dcb *dcb;
+
+	uint8_t chip_version;
+
+	uint32_t dactestval;
+	uint32_t tvdactestval;
+	uint8_t digital_min_front_porch;
+	bool fp_no_ddc;
+};
+
+struct nvbios {
+	struct drm_device *dev;
+	struct nouveau_bios_info pub;
+
+	uint8_t data[NV_PROM_SIZE];
+	unsigned int length;
+	bool execute;
+
+	uint8_t major_version;
+	uint8_t feature_byte;
+	bool is_mobile;
+
+	uint32_t fmaxvco, fminvco;
+
+	bool old_style_init;
+	uint16_t init_script_tbls_ptr;
+	uint16_t extra_init_script_tbl_ptr;
+	uint16_t macro_index_tbl_ptr;
+	uint16_t macro_tbl_ptr;
+	uint16_t condition_tbl_ptr;
+	uint16_t io_condition_tbl_ptr;
+	uint16_t io_flag_condition_tbl_ptr;
+	uint16_t init_function_tbl_ptr;
+
+	uint16_t pll_limit_tbl_ptr;
+	uint16_t ram_restrict_tbl_ptr;
+
+	uint16_t some_script_ptr; /* BIT I + 14 */
+	uint16_t init96_tbl_ptr; /* BIT I + 16 */
+
+	struct bios_parsed_dcb bdcb;
+
+	struct {
+		int crtchead;
+		/* these need remembering across suspend */
+		uint32_t saved_nv_pfb_cfg0;
+	} state;
+
+	struct {
+		struct dcb_entry *output;
+		uint16_t script_table_ptr;
+		uint16_t dp_table_ptr;
+	} display;
+
+	struct {
+		uint16_t fptablepointer;	/* also used by tmds */
+		uint16_t fpxlatetableptr;
+		int xlatwidth;
+		uint16_t lvdsmanufacturerpointer;
+		uint16_t fpxlatemanufacturertableptr;
+		uint16_t mode_ptr;
+		uint16_t xlated_entry;
+		bool power_off_for_reset;
+		bool reset_after_pclk_change;
+		bool dual_link;
+		bool link_c_increment;
+		bool BITbit1;
+		bool if_is_24bit;
+		int duallink_transition_clk;
+		uint8_t strapless_is_24bit;
+		uint8_t *edid;
+
+		/* will need resetting after suspend */
+		int last_script_invoc;
+		bool lvds_init_run;
+	} fp;
+
+	struct {
+		uint16_t output0_script_ptr;
+		uint16_t output1_script_ptr;
+	} tmds;
+
+	struct {
+		uint16_t mem_init_tbl_ptr;
+		uint16_t sdr_seq_tbl_ptr;
+		uint16_t ddr_seq_tbl_ptr;
+
+		struct {
+			uint8_t crt, tv, panel;
+		} i2c_indices;
+
+		uint16_t lvds_single_a_script_ptr;
+	} legacy;
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
new file mode 100644
index 0000000..320a14b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -0,0 +1,671 @@
+/*
+ * Copyright 2007 Dave Airlied
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/*
+ * Authors: Dave Airlied <airlied@linux.ie>
+ *	    Ben Skeggs   <darktama@iinet.net.au>
+ *	    Jeremy Kolb  <jkolb@brandeis.edu>
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+
+static void
+nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
+{
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+	ttm_bo_kunmap(&nvbo->kmap);
+
+	if (unlikely(nvbo->gem))
+		DRM_ERROR("bo %p still attached to GEM object\n", bo);
+
+	spin_lock(&dev_priv->ttm.bo_list_lock);
+	list_del(&nvbo->head);
+	spin_unlock(&dev_priv->ttm.bo_list_lock);
+	kfree(nvbo);
+}
+
+int
+nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
+	       int size, int align, uint32_t flags, uint32_t tile_mode,
+	       uint32_t tile_flags, bool no_vm, bool mappable,
+	       struct nouveau_bo **pnvbo)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_bo *nvbo;
+	int ret, n = 0;
+
+	nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
+	if (!nvbo)
+		return -ENOMEM;
+	INIT_LIST_HEAD(&nvbo->head);
+	INIT_LIST_HEAD(&nvbo->entry);
+	nvbo->mappable = mappable;
+	nvbo->no_vm = no_vm;
+	nvbo->tile_mode = tile_mode;
+	nvbo->tile_flags = tile_flags;
+
+	/*
+	 * Some of the tile_flags have a periodic structure of N*4096 bytes,
+	 * align to to that as well as the page size. Overallocate memory to
+	 * avoid corruption of other buffer objects.
+	 */
+	switch (tile_flags) {
+	case 0x1800:
+	case 0x2800:
+	case 0x4800:
+	case 0x7a00:
+		if (dev_priv->chipset >= 0xA0) {
+			/* This is based on high end cards with 448 bits
+			 * memory bus, could be different elsewhere.*/
+			size += 6 * 28672;
+			/* 8 * 28672 is the actual alignment requirement,
+			 * but we must also align to page size. */
+			align = 2 * 8 * 28672;
+		} else if (dev_priv->chipset >= 0x90) {
+			size += 3 * 16384;
+			align = 12 * 16834;
+		} else {
+			size += 3 * 8192;
+			/* 12 * 8192 is the actual alignment requirement,
+			 * but we must also align to page size. */
+			align = 2 * 12 * 8192;
+		}
+		break;
+	default:
+		break;
+	}
+
+	align >>= PAGE_SHIFT;
+
+	size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
+	if (dev_priv->card_type == NV_50) {
+		size = (size + 65535) & ~65535;
+		if (align < (65536 / PAGE_SIZE))
+			align = (65536 / PAGE_SIZE);
+	}
+
+	if (flags & TTM_PL_FLAG_VRAM)
+		nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
+	if (flags & TTM_PL_FLAG_TT)
+		nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
+	nvbo->placement.fpfn = 0;
+	nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
+	nvbo->placement.placement = nvbo->placements;
+	nvbo->placement.busy_placement = nvbo->placements;
+	nvbo->placement.num_placement = n;
+	nvbo->placement.num_busy_placement = n;
+
+	nvbo->channel = chan;
+	nouveau_bo_placement_set(nvbo, flags);
+	ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
+			  ttm_bo_type_device, &nvbo->placement, align, 0,
+			  false, NULL, size, nouveau_bo_del_ttm);
+	nvbo->channel = NULL;
+	if (ret) {
+		/* ttm will call nouveau_bo_del_ttm if it fails.. */
+		return ret;
+	}
+
+	spin_lock(&dev_priv->ttm.bo_list_lock);
+	list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list);
+	spin_unlock(&dev_priv->ttm.bo_list_lock);
+	*pnvbo = nvbo;
+	return 0;
+}
+
+void
+nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype)
+{
+	int n = 0;
+
+	if (memtype & TTM_PL_FLAG_VRAM)
+		nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
+	if (memtype & TTM_PL_FLAG_TT)
+		nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
+	if (memtype & TTM_PL_FLAG_SYSTEM)
+		nvbo->placements[n++] = TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
+	nvbo->placement.placement = nvbo->placements;
+	nvbo->placement.busy_placement = nvbo->placements;
+	nvbo->placement.num_placement = n;
+	nvbo->placement.num_busy_placement = n;
+}
+
+int
+nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
+{
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
+	struct ttm_buffer_object *bo = &nvbo->bo;
+	int ret, i;
+
+	if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
+		NV_ERROR(nouveau_bdev(bo->bdev)->dev,
+			 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
+			 1 << bo->mem.mem_type, memtype);
+		return -EINVAL;
+	}
+
+	if (nvbo->pin_refcnt++)
+		return 0;
+
+	ret = ttm_bo_reserve(bo, false, false, false, 0);
+	if (ret)
+		goto out;
+
+	nouveau_bo_placement_set(nvbo, memtype);
+	for (i = 0; i < nvbo->placement.num_placement; i++)
+		nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
+	ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
+	if (ret == 0) {
+		switch (bo->mem.mem_type) {
+		case TTM_PL_VRAM:
+			dev_priv->fb_aper_free -= bo->mem.size;
+			break;
+		case TTM_PL_TT:
+			dev_priv->gart_info.aper_free -= bo->mem.size;
+			break;
+		default:
+			break;
+		}
+	}
+	ttm_bo_unreserve(bo);
+out:
+	if (unlikely(ret))
+		nvbo->pin_refcnt--;
+	return ret;
+}
+
+int
+nouveau_bo_unpin(struct nouveau_bo *nvbo)
+{
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
+	struct ttm_buffer_object *bo = &nvbo->bo;
+	int ret, i;
+
+	if (--nvbo->pin_refcnt)
+		return 0;
+
+	ret = ttm_bo_reserve(bo, false, false, false, 0);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < nvbo->placement.num_placement; i++)
+		nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+
+	ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
+	if (ret == 0) {
+		switch (bo->mem.mem_type) {
+		case TTM_PL_VRAM:
+			dev_priv->fb_aper_free += bo->mem.size;
+			break;
+		case TTM_PL_TT:
+			dev_priv->gart_info.aper_free += bo->mem.size;
+			break;
+		default:
+			break;
+		}
+	}
+
+	ttm_bo_unreserve(bo);
+	return ret;
+}
+
+int
+nouveau_bo_map(struct nouveau_bo *nvbo)
+{
+	int ret;
+
+	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
+	if (ret)
+		return ret;
+
+	ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
+	ttm_bo_unreserve(&nvbo->bo);
+	return ret;
+}
+
+void
+nouveau_bo_unmap(struct nouveau_bo *nvbo)
+{
+	ttm_bo_kunmap(&nvbo->kmap);
+}
+
+u16
+nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
+{
+	bool is_iomem;
+	u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
+	mem = &mem[index];
+	if (is_iomem)
+		return ioread16_native((void __force __iomem *)mem);
+	else
+		return *mem;
+}
+
+void
+nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
+{
+	bool is_iomem;
+	u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
+	mem = &mem[index];
+	if (is_iomem)
+		iowrite16_native(val, (void __force __iomem *)mem);
+	else
+		*mem = val;
+}
+
+u32
+nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
+{
+	bool is_iomem;
+	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
+	mem = &mem[index];
+	if (is_iomem)
+		return ioread32_native((void __force __iomem *)mem);
+	else
+		return *mem;
+}
+
+void
+nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
+{
+	bool is_iomem;
+	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
+	mem = &mem[index];
+	if (is_iomem)
+		iowrite32_native(val, (void __force __iomem *)mem);
+	else
+		*mem = val;
+}
+
+static struct ttm_backend *
+nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
+{
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+	struct drm_device *dev = dev_priv->dev;
+
+	switch (dev_priv->gart_info.type) {
+	case NOUVEAU_GART_AGP:
+		return ttm_agp_backend_init(bdev, dev->agp->bridge);
+	case NOUVEAU_GART_SGDMA:
+		return nouveau_sgdma_init_ttm(dev);
+	default:
+		NV_ERROR(dev, "Unknown GART type %d\n",
+			 dev_priv->gart_info.type);
+		break;
+	}
+
+	return NULL;
+}
+
+static int
+nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+{
+	/* We'll do this from user space. */
+	return 0;
+}
+
+static int
+nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+			 struct ttm_mem_type_manager *man)
+{
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+	struct drm_device *dev = dev_priv->dev;
+
+	switch (type) {
+	case TTM_PL_SYSTEM:
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	case TTM_PL_VRAM:
+		man->flags = TTM_MEMTYPE_FLAG_FIXED |
+			     TTM_MEMTYPE_FLAG_MAPPABLE |
+			     TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
+		man->available_caching = TTM_PL_FLAG_UNCACHED |
+					 TTM_PL_FLAG_WC;
+		man->default_caching = TTM_PL_FLAG_WC;
+
+		man->io_addr = NULL;
+		man->io_offset = drm_get_resource_start(dev, 1);
+		man->io_size = drm_get_resource_len(dev, 1);
+		if (man->io_size > nouveau_mem_fb_amount(dev))
+			man->io_size = nouveau_mem_fb_amount(dev);
+
+		man->gpu_offset = dev_priv->vm_vram_base;
+		break;
+	case TTM_PL_TT:
+		switch (dev_priv->gart_info.type) {
+		case NOUVEAU_GART_AGP:
+			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
+				     TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
+			man->available_caching = TTM_PL_FLAG_UNCACHED;
+			man->default_caching = TTM_PL_FLAG_UNCACHED;
+			break;
+		case NOUVEAU_GART_SGDMA:
+			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
+				     TTM_MEMTYPE_FLAG_CMA;
+			man->available_caching = TTM_PL_MASK_CACHING;
+			man->default_caching = TTM_PL_FLAG_CACHED;
+			break;
+		default:
+			NV_ERROR(dev, "Unknown GART type: %d\n",
+				 dev_priv->gart_info.type);
+			return -EINVAL;
+		}
+
+		man->io_offset  = dev_priv->gart_info.aper_base;
+		man->io_size    = dev_priv->gart_info.aper_size;
+		man->io_addr   = NULL;
+		man->gpu_offset = dev_priv->vm_gart_base;
+		break;
+	default:
+		NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void
+nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+	switch (bo->mem.mem_type) {
+	default:
+		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);
+		break;
+	}
+}
+
+
+/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
+ * TTM_PL_{VRAM,TT} directly.
+ */
+static int
+nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
+			      struct nouveau_bo *nvbo, bool evict, bool no_wait,
+			      struct ttm_mem_reg *new_mem)
+{
+	struct nouveau_fence *fence = NULL;
+	int ret;
+
+	ret = nouveau_fence_new(chan, &fence, true);
+	if (ret)
+		return ret;
+
+	ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
+					evict, no_wait, new_mem);
+	nouveau_fence_unref((void *)&fence);
+	return ret;
+}
+
+static inline uint32_t
+nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
+		      struct ttm_mem_reg *mem)
+{
+	if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) {
+		if (mem->mem_type == TTM_PL_TT)
+			return NvDmaGART;
+		return NvDmaVRAM;
+	}
+
+	if (mem->mem_type == TTM_PL_TT)
+		return chan->gart_handle;
+	return chan->vram_handle;
+}
+
+static int
+nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, int no_wait,
+		     struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+	struct nouveau_channel *chan;
+	uint64_t src_offset, dst_offset;
+	uint32_t page_count;
+	int ret;
+
+	chan = nvbo->channel;
+	if (!chan || nvbo->tile_flags || nvbo->no_vm) {
+		chan = dev_priv->channel;
+		if (!chan)
+			return -EINVAL;
+	}
+
+	src_offset = old_mem->mm_node->start << PAGE_SHIFT;
+	dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
+	if (chan != dev_priv->channel) {
+		if (old_mem->mem_type == TTM_PL_TT)
+			src_offset += dev_priv->vm_gart_base;
+		else
+			src_offset += dev_priv->vm_vram_base;
+
+		if (new_mem->mem_type == TTM_PL_TT)
+			dst_offset += dev_priv->vm_gart_base;
+		else
+			dst_offset += dev_priv->vm_vram_base;
+	}
+
+	ret = RING_SPACE(chan, 3);
+	if (ret)
+		return ret;
+	BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
+	OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem));
+	OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem));
+
+	if (dev_priv->card_type >= NV_50) {
+		ret = RING_SPACE(chan, 4);
+		if (ret)
+			return ret;
+		BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
+		OUT_RING(chan, 1);
+		BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
+		OUT_RING(chan, 1);
+	}
+
+	page_count = new_mem->num_pages;
+	while (page_count) {
+		int line_count = (page_count > 2047) ? 2047 : page_count;
+
+		if (dev_priv->card_type >= NV_50) {
+			ret = RING_SPACE(chan, 3);
+			if (ret)
+				return ret;
+			BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
+			OUT_RING(chan, upper_32_bits(src_offset));
+			OUT_RING(chan, upper_32_bits(dst_offset));
+		}
+		ret = RING_SPACE(chan, 11);
+		if (ret)
+			return ret;
+		BEGIN_RING(chan, NvSubM2MF,
+				 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
+		OUT_RING(chan, lower_32_bits(src_offset));
+		OUT_RING(chan, lower_32_bits(dst_offset));
+		OUT_RING(chan, PAGE_SIZE); /* src_pitch */
+		OUT_RING(chan, PAGE_SIZE); /* dst_pitch */
+		OUT_RING(chan, PAGE_SIZE); /* line_length */
+		OUT_RING(chan, line_count);
+		OUT_RING(chan, (1<<8)|(1<<0));
+		OUT_RING(chan, 0);
+		BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
+		OUT_RING(chan, 0);
+
+		page_count -= line_count;
+		src_offset += (PAGE_SIZE * line_count);
+		dst_offset += (PAGE_SIZE * line_count);
+	}
+
+	return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem);
+}
+
+static int
+nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
+		      bool no_wait, struct ttm_mem_reg *new_mem)
+{
+	u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
+	struct ttm_placement placement;
+	struct ttm_mem_reg tmp_mem;
+	int ret;
+
+	placement.fpfn = placement.lpfn = 0;
+	placement.num_placement = placement.num_busy_placement = 1;
+	placement.placement = &placement_memtype;
+
+	tmp_mem = *new_mem;
+	tmp_mem.mm_node = NULL;
+	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
+	if (ret)
+		return ret;
+
+	ret = ttm_tt_bind(bo->ttm, &tmp_mem);
+	if (ret)
+		goto out;
+
+	ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, &tmp_mem);
+	if (ret)
+		goto out;
+
+	ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
+out:
+	if (tmp_mem.mm_node) {
+		spin_lock(&bo->bdev->glob->lru_lock);
+		drm_mm_put_block(tmp_mem.mm_node);
+		spin_unlock(&bo->bdev->glob->lru_lock);
+	}
+
+	return ret;
+}
+
+static int
+nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
+		      bool no_wait, struct ttm_mem_reg *new_mem)
+{
+	u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
+	struct ttm_placement placement;
+	struct ttm_mem_reg tmp_mem;
+	int ret;
+
+	placement.fpfn = placement.lpfn = 0;
+	placement.num_placement = placement.num_busy_placement = 1;
+	placement.placement = &placement_memtype;
+
+	tmp_mem = *new_mem;
+	tmp_mem.mm_node = NULL;
+	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
+	if (ret)
+		return ret;
+
+	ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem);
+	if (ret)
+		goto out;
+
+	ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, new_mem);
+	if (ret)
+		goto out;
+
+out:
+	if (tmp_mem.mm_node) {
+		spin_lock(&bo->bdev->glob->lru_lock);
+		drm_mm_put_block(tmp_mem.mm_node);
+		spin_unlock(&bo->bdev->glob->lru_lock);
+	}
+
+	return ret;
+}
+
+static int
+nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
+		bool no_wait, struct ttm_mem_reg *new_mem)
+{
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+	struct drm_device *dev = dev_priv->dev;
+	struct ttm_mem_reg *old_mem = &bo->mem;
+	int ret;
+
+	if (dev_priv->card_type == NV_50 && new_mem->mem_type == TTM_PL_VRAM &&
+	    !nvbo->no_vm) {
+		uint64_t offset = new_mem->mm_node->start << PAGE_SHIFT;
+
+		ret = nv50_mem_vm_bind_linear(dev,
+					      offset + dev_priv->vm_vram_base,
+					      new_mem->size, nvbo->tile_flags,
+					      offset);
+		if (ret)
+			return ret;
+	}
+
+	if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE)
+		return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+
+	if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
+		BUG_ON(bo->mem.mm_node != NULL);
+		bo->mem = *new_mem;
+		new_mem->mm_node = NULL;
+		return 0;
+	}
+
+	if (new_mem->mem_type == TTM_PL_SYSTEM) {
+		if (old_mem->mem_type == TTM_PL_SYSTEM)
+			return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+		if (nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem))
+			return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+	} else if (old_mem->mem_type == TTM_PL_SYSTEM) {
+		if (nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem))
+			return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+	} else {
+		if (nouveau_bo_move_m2mf(bo, evict, no_wait, old_mem, new_mem))
+			return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+	}
+
+	return 0;
+}
+
+static int
+nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+	return 0;
+}
+
+struct ttm_bo_driver nouveau_bo_driver = {
+	.create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
+	.invalidate_caches = nouveau_bo_invalidate_caches,
+	.init_mem_type = nouveau_bo_init_mem_type,
+	.evict_flags = nouveau_bo_evict_flags,
+	.move = nouveau_bo_move,
+	.verify_access = nouveau_bo_verify_access,
+	.sync_obj_signaled = nouveau_fence_signalled,
+	.sync_obj_wait = nouveau_fence_wait,
+	.sync_obj_flush = nouveau_fence_flush,
+	.sync_obj_unref = nouveau_fence_unref,
+	.sync_obj_ref = nouveau_fence_ref,
+};
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c
new file mode 100644
index 0000000..ee2b845
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_calc.c
@@ -0,0 +1,478 @@
+/*
+ * Copyright 1993-2003 NVIDIA, Corporation
+ * Copyright 2007-2009 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+
+/****************************************************************************\
+*                                                                            *
+* The video arbitration routines calculate some "magic" numbers.  Fixes      *
+* the snow seen when accessing the framebuffer without it.                   *
+* It just works (I hope).                                                    *
+*                                                                            *
+\****************************************************************************/
+
+struct nv_fifo_info {
+	int lwm;
+	int burst;
+};
+
+struct nv_sim_state {
+	int pclk_khz;
+	int mclk_khz;
+	int nvclk_khz;
+	int bpp;
+	int mem_page_miss;
+	int mem_latency;
+	int memory_type;
+	int memory_width;
+	int two_heads;
+};
+
+static void
+nv04_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb)
+{
+	int pagemiss, cas, width, bpp;
+	int nvclks, mclks, pclks, crtpagemiss;
+	int found, mclk_extra, mclk_loop, cbs, m1, p1;
+	int mclk_freq, pclk_freq, nvclk_freq;
+	int us_m, us_n, us_p, crtc_drain_rate;
+	int cpm_us, us_crt, clwm;
+
+	pclk_freq = arb->pclk_khz;
+	mclk_freq = arb->mclk_khz;
+	nvclk_freq = arb->nvclk_khz;
+	pagemiss = arb->mem_page_miss;
+	cas = arb->mem_latency;
+	width = arb->memory_width >> 6;
+	bpp = arb->bpp;
+	cbs = 128;
+
+	pclks = 2;
+	nvclks = 10;
+	mclks = 13 + cas;
+	mclk_extra = 3;
+	found = 0;
+
+	while (!found) {
+		found = 1;
+
+		mclk_loop = mclks + mclk_extra;
+		us_m = mclk_loop * 1000 * 1000 / mclk_freq;
+		us_n = nvclks * 1000 * 1000 / nvclk_freq;
+		us_p = nvclks * 1000 * 1000 / pclk_freq;
+
+		crtc_drain_rate = pclk_freq * bpp / 8;
+		crtpagemiss = 2;
+		crtpagemiss += 1;
+		cpm_us = crtpagemiss * pagemiss * 1000 * 1000 / mclk_freq;
+		us_crt = cpm_us + us_m + us_n + us_p;
+		clwm = us_crt * crtc_drain_rate / (1000 * 1000);
+		clwm++;
+
+		m1 = clwm + cbs - 512;
+		p1 = m1 * pclk_freq / mclk_freq;
+		p1 = p1 * bpp / 8;
+		if ((p1 < m1 && m1 > 0) || clwm > 519) {
+			found = !mclk_extra;
+			mclk_extra--;
+		}
+		if (clwm < 384)
+			clwm = 384;
+
+		fifo->lwm = clwm;
+		fifo->burst = cbs;
+	}
+}
+
+static void
+nv10_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb)
+{
+	int fill_rate, drain_rate;
+	int pclks, nvclks, mclks, xclks;
+	int pclk_freq, nvclk_freq, mclk_freq;
+	int fill_lat, extra_lat;
+	int max_burst_o, max_burst_l;
+	int fifo_len, min_lwm, max_lwm;
+	const int burst_lat = 80; /* Maximum allowable latency due
+				   * to the CRTC FIFO burst. (ns) */
+
+	pclk_freq = arb->pclk_khz;
+	nvclk_freq = arb->nvclk_khz;
+	mclk_freq = arb->mclk_khz;
+
+	fill_rate = mclk_freq * arb->memory_width / 8; /* kB/s */
+	drain_rate = pclk_freq * arb->bpp / 8; /* kB/s */
+
+	fifo_len = arb->two_heads ? 1536 : 1024; /* B */
+
+	/* Fixed FIFO refill latency. */
+
+	pclks = 4;	/* lwm detect. */
+
+	nvclks = 3	/* lwm -> sync. */
+		+ 2	/* fbi bus cycles (1 req + 1 busy) */
+		+ 1	/* 2 edge sync.  may be very close to edge so
+			 * just put one. */
+		+ 1	/* fbi_d_rdv_n */
+		+ 1	/* Fbi_d_rdata */
+		+ 1;	/* crtfifo load */
+
+	mclks = 1	/* 2 edge sync.  may be very close to edge so
+			 * just put one. */
+		+ 1	/* arb_hp_req */
+		+ 5	/* tiling pipeline */
+		+ 2	/* latency fifo */
+		+ 2	/* memory request to fbio block */
+		+ 7;	/* data returned from fbio block */
+
+	/* Need to accumulate 256 bits for read */
+	mclks += (arb->memory_type == 0 ? 2 : 1)
+		* arb->memory_width / 32;
+
+	fill_lat = mclks * 1000 * 1000 / mclk_freq   /* minimum mclk latency */
+		+ nvclks * 1000 * 1000 / nvclk_freq  /* nvclk latency */
+		+ pclks * 1000 * 1000 / pclk_freq;   /* pclk latency */
+
+	/* Conditional FIFO refill latency. */
+
+	xclks = 2 * arb->mem_page_miss + mclks /* Extra latency due to
+						* the overlay. */
+		+ 2 * arb->mem_page_miss       /* Extra pagemiss latency. */
+		+ (arb->bpp == 32 ? 8 : 4);    /* Margin of error. */
+
+	extra_lat = xclks * 1000 * 1000 / mclk_freq;
+
+	if (arb->two_heads)
+		/* Account for another CRTC. */
+		extra_lat += fill_lat + extra_lat + burst_lat;
+
+	/* FIFO burst */
+
+	/* Max burst not leading to overflows. */
+	max_burst_o = (1 + fifo_len - extra_lat * drain_rate / (1000 * 1000))
+		* (fill_rate / 1000) / ((fill_rate - drain_rate) / 1000);
+	fifo->burst = min(max_burst_o, 1024);
+
+	/* Max burst value with an acceptable latency. */
+	max_burst_l = burst_lat * fill_rate / (1000 * 1000);
+	fifo->burst = min(max_burst_l, fifo->burst);
+
+	fifo->burst = rounddown_pow_of_two(fifo->burst);
+
+	/* FIFO low watermark */
+
+	min_lwm = (fill_lat + extra_lat) * drain_rate / (1000 * 1000) + 1;
+	max_lwm = fifo_len - fifo->burst
+		+ fill_lat * drain_rate / (1000 * 1000)
+		+ fifo->burst * drain_rate / fill_rate;
+
+	fifo->lwm = min_lwm + 10 * (max_lwm - min_lwm) / 100; /* Empirical. */
+}
+
+static void
+nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
+		int *burst, int *lwm)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv_fifo_info fifo_data;
+	struct nv_sim_state sim_data;
+	int MClk = nouveau_hw_get_clock(dev, MPLL);
+	int NVClk = nouveau_hw_get_clock(dev, NVPLL);
+	uint32_t cfg1 = nvReadFB(dev, NV_PFB_CFG1);
+
+	sim_data.pclk_khz = VClk;
+	sim_data.mclk_khz = MClk;
+	sim_data.nvclk_khz = NVClk;
+	sim_data.bpp = bpp;
+	sim_data.two_heads = nv_two_heads(dev);
+	if ((dev->pci_device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
+	    (dev->pci_device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
+		uint32_t type;
+
+		pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type);
+
+		sim_data.memory_type = (type >> 12) & 1;
+		sim_data.memory_width = 64;
+		sim_data.mem_latency = 3;
+		sim_data.mem_page_miss = 10;
+	} else {
+		sim_data.memory_type = nvReadFB(dev, NV_PFB_CFG0) & 0x1;
+		sim_data.memory_width = (nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64;
+		sim_data.mem_latency = cfg1 & 0xf;
+		sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1);
+	}
+
+	if (dev_priv->card_type == NV_04)
+		nv04_calc_arb(&fifo_data, &sim_data);
+	else
+		nv10_calc_arb(&fifo_data, &sim_data);
+
+	*burst = ilog2(fifo_data.burst >> 4);
+	*lwm = fifo_data.lwm >> 3;
+}
+
+static void
+nv30_update_arb(int *burst, int *lwm)
+{
+	unsigned int fifo_size, burst_size, graphics_lwm;
+
+	fifo_size = 2048;
+	burst_size = 512;
+	graphics_lwm = fifo_size - burst_size;
+
+	*burst = ilog2(burst_size >> 5);
+	*lwm = graphics_lwm >> 3;
+}
+
+void
+nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->card_type < NV_30)
+		nv04_update_arb(dev, vclk, bpp, burst, lwm);
+	else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
+		 (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
+		*burst = 128;
+		*lwm = 0x0480;
+	} else
+		nv30_update_arb(burst, lwm);
+}
+
+static int
+getMNP_single(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
+	      struct nouveau_pll_vals *bestpv)
+{
+	/* Find M, N and P for a single stage PLL
+	 *
+	 * Note that some bioses (NV3x) have lookup tables of precomputed MNP
+	 * values, but we're too lazy to use those atm
+	 *
+	 * "clk" parameter in kHz
+	 * returns calculated clock
+	 */
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int cv = dev_priv->vbios->chip_version;
+	int minvco = pll_lim->vco1.minfreq, maxvco = pll_lim->vco1.maxfreq;
+	int minM = pll_lim->vco1.min_m, maxM = pll_lim->vco1.max_m;
+	int minN = pll_lim->vco1.min_n, maxN = pll_lim->vco1.max_n;
+	int minU = pll_lim->vco1.min_inputfreq;
+	int maxU = pll_lim->vco1.max_inputfreq;
+	int minP = pll_lim->max_p ? pll_lim->min_p : 0;
+	int maxP = pll_lim->max_p ? pll_lim->max_p : pll_lim->max_usable_log2p;
+	int crystal = pll_lim->refclk;
+	int M, N, thisP, P;
+	int clkP, calcclk;
+	int delta, bestdelta = INT_MAX;
+	int bestclk = 0;
+
+	/* this division verified for nv20, nv18, nv28 (Haiku), and nv34 */
+	/* possibly correlated with introduction of 27MHz crystal */
+	if (dev_priv->card_type < NV_50) {
+		if (cv < 0x17 || cv == 0x1a || cv == 0x20) {
+			if (clk > 250000)
+				maxM = 6;
+			if (clk > 340000)
+				maxM = 2;
+		} else if (cv < 0x40) {
+			if (clk > 150000)
+				maxM = 6;
+			if (clk > 200000)
+				maxM = 4;
+			if (clk > 340000)
+				maxM = 2;
+		}
+	}
+
+	P = pll_lim->max_p ? maxP : (1 << maxP);
+	if ((clk * P) < minvco) {
+		minvco = clk * maxP;
+		maxvco = minvco * 2;
+	}
+
+	if (clk + clk/200 > maxvco)	/* +0.5% */
+		maxvco = clk + clk/200;
+
+	/* NV34 goes maxlog2P->0, NV20 goes 0->maxlog2P */
+	for (thisP = minP; thisP <= maxP; thisP++) {
+		P = pll_lim->max_p ? thisP : (1 << thisP);
+		clkP = clk * P;
+
+		if (clkP < minvco)
+			continue;
+		if (clkP > maxvco)
+			return bestclk;
+
+		for (M = minM; M <= maxM; M++) {
+			if (crystal/M < minU)
+				return bestclk;
+			if (crystal/M > maxU)
+				continue;
+
+			/* add crystal/2 to round better */
+			N = (clkP * M + crystal/2) / crystal;
+
+			if (N < minN)
+				continue;
+			if (N > maxN)
+				break;
+
+			/* more rounding additions */
+			calcclk = ((N * crystal + P/2) / P + M/2) / M;
+			delta = abs(calcclk - clk);
+			/* we do an exhaustive search rather than terminating
+			 * on an optimality condition...
+			 */
+			if (delta < bestdelta) {
+				bestdelta = delta;
+				bestclk = calcclk;
+				bestpv->N1 = N;
+				bestpv->M1 = M;
+				bestpv->log2P = thisP;
+				if (delta == 0)	/* except this one */
+					return bestclk;
+			}
+		}
+	}
+
+	return bestclk;
+}
+
+static int
+getMNP_double(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
+	      struct nouveau_pll_vals *bestpv)
+{
+	/* Find M, N and P for a two stage PLL
+	 *
+	 * Note that some bioses (NV30+) have lookup tables of precomputed MNP
+	 * values, but we're too lazy to use those atm
+	 *
+	 * "clk" parameter in kHz
+	 * returns calculated clock
+	 */
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int chip_version = dev_priv->vbios->chip_version;
+	int minvco1 = pll_lim->vco1.minfreq, maxvco1 = pll_lim->vco1.maxfreq;
+	int minvco2 = pll_lim->vco2.minfreq, maxvco2 = pll_lim->vco2.maxfreq;
+	int minU1 = pll_lim->vco1.min_inputfreq, minU2 = pll_lim->vco2.min_inputfreq;
+	int maxU1 = pll_lim->vco1.max_inputfreq, maxU2 = pll_lim->vco2.max_inputfreq;
+	int minM1 = pll_lim->vco1.min_m, maxM1 = pll_lim->vco1.max_m;
+	int minN1 = pll_lim->vco1.min_n, maxN1 = pll_lim->vco1.max_n;
+	int minM2 = pll_lim->vco2.min_m, maxM2 = pll_lim->vco2.max_m;
+	int minN2 = pll_lim->vco2.min_n, maxN2 = pll_lim->vco2.max_n;
+	int maxlog2P = pll_lim->max_usable_log2p;
+	int crystal = pll_lim->refclk;
+	bool fixedgain2 = (minM2 == maxM2 && minN2 == maxN2);
+	int M1, N1, M2, N2, log2P;
+	int clkP, calcclk1, calcclk2, calcclkout;
+	int delta, bestdelta = INT_MAX;
+	int bestclk = 0;
+
+	int vco2 = (maxvco2 - maxvco2/200) / 2;
+	for (log2P = 0; clk && log2P < maxlog2P && clk <= (vco2 >> log2P); log2P++)
+		;
+	clkP = clk << log2P;
+
+	if (maxvco2 < clk + clk/200)	/* +0.5% */
+		maxvco2 = clk + clk/200;
+
+	for (M1 = minM1; M1 <= maxM1; M1++) {
+		if (crystal/M1 < minU1)
+			return bestclk;
+		if (crystal/M1 > maxU1)
+			continue;
+
+		for (N1 = minN1; N1 <= maxN1; N1++) {
+			calcclk1 = crystal * N1 / M1;
+			if (calcclk1 < minvco1)
+				continue;
+			if (calcclk1 > maxvco1)
+				break;
+
+			for (M2 = minM2; M2 <= maxM2; M2++) {
+				if (calcclk1/M2 < minU2)
+					break;
+				if (calcclk1/M2 > maxU2)
+					continue;
+
+				/* add calcclk1/2 to round better */
+				N2 = (clkP * M2 + calcclk1/2) / calcclk1;
+				if (N2 < minN2)
+					continue;
+				if (N2 > maxN2)
+					break;
+
+				if (!fixedgain2) {
+					if (chip_version < 0x60)
+						if (N2/M2 < 4 || N2/M2 > 10)
+							continue;
+
+					calcclk2 = calcclk1 * N2 / M2;
+					if (calcclk2 < minvco2)
+						break;
+					if (calcclk2 > maxvco2)
+						continue;
+				} else
+					calcclk2 = calcclk1;
+
+				calcclkout = calcclk2 >> log2P;
+				delta = abs(calcclkout - clk);
+				/* we do an exhaustive search rather than terminating
+				 * on an optimality condition...
+				 */
+				if (delta < bestdelta) {
+					bestdelta = delta;
+					bestclk = calcclkout;
+					bestpv->N1 = N1;
+					bestpv->M1 = M1;
+					bestpv->N2 = N2;
+					bestpv->M2 = M2;
+					bestpv->log2P = log2P;
+					if (delta == 0)	/* except this one */
+						return bestclk;
+				}
+			}
+		}
+	}
+
+	return bestclk;
+}
+
+int
+nouveau_calc_pll_mnp(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
+		     struct nouveau_pll_vals *pv)
+{
+	int outclk;
+
+	if (!pll_lim->vco2.maxfreq)
+		outclk = getMNP_single(dev, pll_lim, clk, pv);
+	else
+		outclk = getMNP_double(dev, pll_lim, clk, pv);
+
+	if (!outclk)
+		NV_ERROR(dev, "Could not find a compatible set of PLL values\n");
+
+	return outclk;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
new file mode 100644
index 0000000..9aaa972
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -0,0 +1,468 @@
+/*
+ * Copyright 2005-2006 Stephane Marchesin
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+
+static int
+nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_bo *pb = chan->pushbuf_bo;
+	struct nouveau_gpuobj *pushbuf = NULL;
+	uint32_t start = pb->bo.mem.mm_node->start << PAGE_SHIFT;
+	int ret;
+
+	if (pb->bo.mem.mem_type == TTM_PL_TT) {
+		ret = nouveau_gpuobj_gart_dma_new(chan, 0,
+						  dev_priv->gart_info.aper_size,
+						  NV_DMA_ACCESS_RO, &pushbuf,
+						  NULL);
+		chan->pushbuf_base = start;
+	} else
+	if (dev_priv->card_type != NV_04) {
+		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
+					     dev_priv->fb_available_size,
+					     NV_DMA_ACCESS_RO,
+					     NV_DMA_TARGET_VIDMEM, &pushbuf);
+		chan->pushbuf_base = start;
+	} else {
+		/* NV04 cmdbuf hack, from original ddx.. not sure of it's
+		 * exact reason for existing :)  PCI access to cmdbuf in
+		 * VRAM.
+		 */
+		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+					     drm_get_resource_start(dev, 1),
+					     dev_priv->fb_available_size,
+					     NV_DMA_ACCESS_RO,
+					     NV_DMA_TARGET_PCI, &pushbuf);
+		chan->pushbuf_base = start;
+	}
+
+	ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf);
+	if (ret) {
+		NV_ERROR(dev, "Error referencing pushbuf ctxdma: %d\n", ret);
+		if (pushbuf != dev_priv->gart_info.sg_ctxdma)
+			nouveau_gpuobj_del(dev, &pushbuf);
+		return ret;
+	}
+
+	return 0;
+}
+
+static struct nouveau_bo *
+nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
+{
+	struct nouveau_bo *pushbuf = NULL;
+	int location, ret;
+
+	if (nouveau_vram_pushbuf)
+		location = TTM_PL_FLAG_VRAM;
+	else
+		location = TTM_PL_FLAG_TT;
+
+	ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false,
+			     true, &pushbuf);
+	if (ret) {
+		NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
+		return NULL;
+	}
+
+	ret = nouveau_bo_pin(pushbuf, location);
+	if (ret) {
+		NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret);
+		nouveau_bo_ref(NULL, &pushbuf);
+		return NULL;
+	}
+
+	return pushbuf;
+}
+
+/* allocates and initializes a fifo for user space consumption */
+int
+nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
+		      struct drm_file *file_priv,
+		      uint32_t vram_handle, uint32_t tt_handle)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	struct nouveau_channel *chan;
+	int channel, user;
+	int ret;
+
+	/*
+	 * Alright, here is the full story
+	 * Nvidia cards have multiple hw fifo contexts (praise them for that,
+	 * no complicated crash-prone context switches)
+	 * We allocate a new context for each app and let it write to it
+	 * directly (woo, full userspace command submission !)
+	 * When there are no more contexts, you lost
+	 */
+	for (channel = 0; channel < pfifo->channels; channel++) {
+		if (dev_priv->fifos[channel] == NULL)
+			break;
+	}
+
+	/* no more fifos. you lost. */
+	if (channel == pfifo->channels)
+		return -EINVAL;
+
+	dev_priv->fifos[channel] = kzalloc(sizeof(struct nouveau_channel),
+					   GFP_KERNEL);
+	if (!dev_priv->fifos[channel])
+		return -ENOMEM;
+	dev_priv->fifo_alloc_count++;
+	chan = dev_priv->fifos[channel];
+	INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
+	INIT_LIST_HEAD(&chan->fence.pending);
+	chan->dev = dev;
+	chan->id = channel;
+	chan->file_priv = file_priv;
+	chan->vram_handle = vram_handle;
+	chan->gart_handle = tt_handle;
+
+	NV_INFO(dev, "Allocating FIFO number %d\n", channel);
+
+	/* Allocate DMA push buffer */
+	chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev);
+	if (!chan->pushbuf_bo) {
+		ret = -ENOMEM;
+		NV_ERROR(dev, "pushbuf %d\n", ret);
+		nouveau_channel_free(chan);
+		return ret;
+	}
+
+	/* Locate channel's user control regs */
+	if (dev_priv->card_type < NV_40)
+		user = NV03_USER(channel);
+	else
+	if (dev_priv->card_type < NV_50)
+		user = NV40_USER(channel);
+	else
+		user = NV50_USER(channel);
+
+	chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user,
+								PAGE_SIZE);
+	if (!chan->user) {
+		NV_ERROR(dev, "ioremap of regs failed.\n");
+		nouveau_channel_free(chan);
+		return -ENOMEM;
+	}
+	chan->user_put = 0x40;
+	chan->user_get = 0x44;
+
+	/* Allocate space for per-channel fixed notifier memory */
+	ret = nouveau_notifier_init_channel(chan);
+	if (ret) {
+		NV_ERROR(dev, "ntfy %d\n", ret);
+		nouveau_channel_free(chan);
+		return ret;
+	}
+
+	/* Setup channel's default objects */
+	ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle);
+	if (ret) {
+		NV_ERROR(dev, "gpuobj %d\n", ret);
+		nouveau_channel_free(chan);
+		return ret;
+	}
+
+	/* Create a dma object for the push buffer */
+	ret = nouveau_channel_pushbuf_ctxdma_init(chan);
+	if (ret) {
+		NV_ERROR(dev, "pbctxdma %d\n", ret);
+		nouveau_channel_free(chan);
+		return ret;
+	}
+
+	/* disable the fifo caches */
+	pfifo->reassign(dev, false);
+
+	/* Create a graphics context for new channel */
+	ret = pgraph->create_context(chan);
+	if (ret) {
+		nouveau_channel_free(chan);
+		return ret;
+	}
+
+	/* Construct inital RAMFC for new channel */
+	ret = pfifo->create_context(chan);
+	if (ret) {
+		nouveau_channel_free(chan);
+		return ret;
+	}
+
+	pfifo->reassign(dev, true);
+
+	ret = nouveau_dma_init(chan);
+	if (!ret)
+		ret = nouveau_fence_init(chan);
+	if (ret) {
+		nouveau_channel_free(chan);
+		return ret;
+	}
+
+	nouveau_debugfs_channel_init(chan);
+
+	NV_INFO(dev, "%s: initialised FIFO %d\n", __func__, channel);
+	*chan_ret = chan;
+	return 0;
+}
+
+int
+nouveau_channel_idle(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_engine *engine = &dev_priv->engine;
+	uint32_t caches;
+	int idle;
+
+	if (!chan) {
+		NV_ERROR(dev, "no channel...\n");
+		return 1;
+	}
+
+	caches = nv_rd32(dev, NV03_PFIFO_CACHES);
+	nv_wr32(dev, NV03_PFIFO_CACHES, caches & ~1);
+
+	if (engine->fifo.channel_id(dev) != chan->id) {
+		struct nouveau_gpuobj *ramfc =
+			chan->ramfc ? chan->ramfc->gpuobj : NULL;
+
+		if (!ramfc) {
+			NV_ERROR(dev, "No RAMFC for channel %d\n", chan->id);
+			return 1;
+		}
+
+		engine->instmem.prepare_access(dev, false);
+		if (nv_ro32(dev, ramfc, 0) != nv_ro32(dev, ramfc, 1))
+			idle = 0;
+		else
+			idle = 1;
+		engine->instmem.finish_access(dev);
+	} else {
+		idle = (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET) ==
+			nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
+	}
+
+	nv_wr32(dev, NV03_PFIFO_CACHES, caches);
+	return idle;
+}
+
+/* stops a fifo */
+void
+nouveau_channel_free(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	unsigned long flags;
+	int ret;
+
+	NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id);
+
+	nouveau_debugfs_channel_fini(chan);
+
+	/* Give outstanding push buffers a chance to complete */
+	spin_lock_irqsave(&chan->fence.lock, flags);
+	nouveau_fence_update(chan);
+	spin_unlock_irqrestore(&chan->fence.lock, flags);
+	if (chan->fence.sequence != chan->fence.sequence_ack) {
+		struct nouveau_fence *fence = NULL;
+
+		ret = nouveau_fence_new(chan, &fence, true);
+		if (ret == 0) {
+			ret = nouveau_fence_wait(fence, NULL, false, false);
+			nouveau_fence_unref((void *)&fence);
+		}
+
+		if (ret)
+			NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
+	}
+
+	/* Ensure all outstanding fences are signaled.  They should be if the
+	 * above attempts at idling were OK, but if we failed this'll tell TTM
+	 * we're done with the buffers.
+	 */
+	nouveau_fence_fini(chan);
+
+	/* Ensure the channel is no longer active on the GPU */
+	pfifo->reassign(dev, false);
+
+	if (pgraph->channel(dev) == chan) {
+		pgraph->fifo_access(dev, false);
+		pgraph->unload_context(dev);
+		pgraph->fifo_access(dev, true);
+	}
+	pgraph->destroy_context(chan);
+
+	if (pfifo->channel_id(dev) == chan->id) {
+		pfifo->disable(dev);
+		pfifo->unload_context(dev);
+		pfifo->enable(dev);
+	}
+	pfifo->destroy_context(chan);
+
+	pfifo->reassign(dev, true);
+
+	/* Release the channel's resources */
+	nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
+	if (chan->pushbuf_bo) {
+		nouveau_bo_unpin(chan->pushbuf_bo);
+		nouveau_bo_ref(NULL, &chan->pushbuf_bo);
+	}
+	nouveau_gpuobj_channel_takedown(chan);
+	nouveau_notifier_takedown_channel(chan);
+	if (chan->user)
+		iounmap(chan->user);
+
+	dev_priv->fifos[chan->id] = NULL;
+	dev_priv->fifo_alloc_count--;
+	kfree(chan);
+}
+
+/* cleans up all the fifos from file_priv */
+void
+nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_engine *engine = &dev_priv->engine;
+	int i;
+
+	NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
+	for (i = 0; i < engine->fifo.channels; i++) {
+		struct nouveau_channel *chan = dev_priv->fifos[i];
+
+		if (chan && chan->file_priv == file_priv)
+			nouveau_channel_free(chan);
+	}
+}
+
+int
+nouveau_channel_owner(struct drm_device *dev, struct drm_file *file_priv,
+		      int channel)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_engine *engine = &dev_priv->engine;
+
+	if (channel >= engine->fifo.channels)
+		return 0;
+	if (dev_priv->fifos[channel] == NULL)
+		return 0;
+
+	return (dev_priv->fifos[channel]->file_priv == file_priv);
+}
+
+/***********************************
+ * ioctls wrapping the functions
+ ***********************************/
+
+static int
+nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct drm_nouveau_channel_alloc *init = data;
+	struct nouveau_channel *chan;
+	int ret;
+
+	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+	if (dev_priv->engine.graph.accel_blocked)
+		return -ENODEV;
+
+	if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
+		return -EINVAL;
+
+	ret = nouveau_channel_alloc(dev, &chan, file_priv,
+				    init->fb_ctxdma_handle,
+				    init->tt_ctxdma_handle);
+	if (ret)
+		return ret;
+	init->channel  = chan->id;
+
+	init->subchan[0].handle = NvM2MF;
+	if (dev_priv->card_type < NV_50)
+		init->subchan[0].grclass = 0x0039;
+	else
+		init->subchan[0].grclass = 0x5039;
+	init->nr_subchan = 1;
+
+	/* Named memory object area */
+	ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
+				    &init->notifier_handle);
+	if (ret) {
+		nouveau_channel_free(chan);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_nouveau_channel_free *cfree = data;
+	struct nouveau_channel *chan;
+
+	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan);
+
+	nouveau_channel_free(chan);
+	return 0;
+}
+
+/***********************************
+ * finally, the ioctl table
+ ***********************************/
+
+struct drm_ioctl_desc nouveau_ioctls[] = {
+	DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL, nouveau_gem_ioctl_pushbuf_call, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PIN, nouveau_gem_ioctl_pin, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_UNPIN, nouveau_gem_ioctl_unpin, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL2, nouveau_gem_ioctl_pushbuf_call2, DRM_AUTH),
+};
+
+int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
new file mode 100644
index 0000000..032cf09
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -0,0 +1,824 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_edid.h"
+#include "drm_crtc_helper.h"
+#include "nouveau_reg.h"
+#include "nouveau_drv.h"
+#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
+#include "nouveau_connector.h"
+#include "nouveau_hw.h"
+
+static inline struct drm_encoder_slave_funcs *
+get_slave_funcs(struct nouveau_encoder *enc)
+{
+	return to_encoder_slave(to_drm_encoder(enc))->slave_funcs;
+}
+
+static struct nouveau_encoder *
+find_encoder_by_type(struct drm_connector *connector, int type)
+{
+	struct drm_device *dev = connector->dev;
+	struct nouveau_encoder *nv_encoder;
+	struct drm_mode_object *obj;
+	int i, id;
+
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		id = connector->encoder_ids[i];
+		if (!id)
+			break;
+
+		obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
+		if (!obj)
+			continue;
+		nv_encoder = nouveau_encoder(obj_to_encoder(obj));
+
+		if (type == OUTPUT_ANY || nv_encoder->dcb->type == type)
+			return nv_encoder;
+	}
+
+	return NULL;
+}
+
+struct nouveau_connector *
+nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
+{
+	struct drm_device *dev = to_drm_encoder(encoder)->dev;
+	struct drm_connector *drm_connector;
+
+	list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) {
+		if (drm_connector->encoder == to_drm_encoder(encoder))
+			return nouveau_connector(drm_connector);
+	}
+
+	return NULL;
+}
+
+
+static void
+nouveau_connector_destroy(struct drm_connector *drm_connector)
+{
+	struct nouveau_connector *connector = nouveau_connector(drm_connector);
+	struct drm_device *dev = connector->base.dev;
+
+	NV_DEBUG(dev, "\n");
+
+	if (!connector)
+		return;
+
+	drm_sysfs_connector_remove(drm_connector);
+	drm_connector_cleanup(drm_connector);
+	kfree(drm_connector);
+}
+
+static void
+nouveau_connector_ddc_prepare(struct drm_connector *connector, int *flags)
+{
+	struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
+
+	if (dev_priv->card_type >= NV_50)
+		return;
+
+	*flags = 0;
+	if (NVLockVgaCrtcs(dev_priv->dev, false))
+		*flags |= 1;
+	if (nv_heads_tied(dev_priv->dev))
+		*flags |= 2;
+
+	if (*flags & 2)
+		NVSetOwner(dev_priv->dev, 0); /* necessary? */
+}
+
+static void
+nouveau_connector_ddc_finish(struct drm_connector *connector, int flags)
+{
+	struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
+
+	if (dev_priv->card_type >= NV_50)
+		return;
+
+	if (flags & 2)
+		NVSetOwner(dev_priv->dev, 4);
+	if (flags & 1)
+		NVLockVgaCrtcs(dev_priv->dev, true);
+}
+
+static struct nouveau_i2c_chan *
+nouveau_connector_ddc_detect(struct drm_connector *connector,
+			     struct nouveau_encoder **pnv_encoder)
+{
+	struct drm_device *dev = connector->dev;
+	uint8_t out_buf[] = { 0x0, 0x0}, buf[2];
+	int ret, flags, i;
+
+	struct i2c_msg msgs[] = {
+		{
+			.addr = 0x50,
+			.flags = 0,
+			.len = 1,
+			.buf = out_buf,
+		},
+		{
+			.addr = 0x50,
+			.flags = I2C_M_RD,
+			.len = 1,
+			.buf = buf,
+		}
+	};
+
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		struct nouveau_i2c_chan *i2c = NULL;
+		struct nouveau_encoder *nv_encoder;
+		struct drm_mode_object *obj;
+		int id;
+
+		id = connector->encoder_ids[i];
+		if (!id)
+			break;
+
+		obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
+		if (!obj)
+			continue;
+		nv_encoder = nouveau_encoder(obj_to_encoder(obj));
+
+		if (nv_encoder->dcb->i2c_index < 0xf)
+			i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+		if (!i2c)
+			continue;
+
+		nouveau_connector_ddc_prepare(connector, &flags);
+		ret = i2c_transfer(&i2c->adapter, msgs, 2);
+		nouveau_connector_ddc_finish(connector, flags);
+
+		if (ret == 2) {
+			*pnv_encoder = nv_encoder;
+			return i2c;
+		}
+	}
+
+	return NULL;
+}
+
+static void
+nouveau_connector_set_encoder(struct drm_connector *connector,
+			      struct nouveau_encoder *nv_encoder)
+{
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
+	struct drm_device *dev = connector->dev;
+
+	if (nv_connector->detected_encoder == nv_encoder)
+		return;
+	nv_connector->detected_encoder = nv_encoder;
+
+	if (nv_encoder->dcb->type == OUTPUT_LVDS ||
+	    nv_encoder->dcb->type == OUTPUT_TMDS) {
+		connector->doublescan_allowed = false;
+		connector->interlace_allowed = false;
+	} else {
+		connector->doublescan_allowed = true;
+		if (dev_priv->card_type == NV_20 ||
+		   (dev_priv->card_type == NV_10 &&
+		    (dev->pci_device & 0x0ff0) != 0x0100 &&
+		    (dev->pci_device & 0x0ff0) != 0x0150))
+			/* HW is broken */
+			connector->interlace_allowed = false;
+		else
+			connector->interlace_allowed = true;
+	}
+
+	if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
+		drm_connector_property_set_value(connector,
+			dev->mode_config.dvi_i_subconnector_property,
+			nv_encoder->dcb->type == OUTPUT_TMDS ?
+			DRM_MODE_SUBCONNECTOR_DVID :
+			DRM_MODE_SUBCONNECTOR_DVIA);
+	}
+}
+
+static enum drm_connector_status
+nouveau_connector_detect(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	struct nouveau_encoder *nv_encoder = NULL;
+	struct nouveau_i2c_chan *i2c;
+	int type, flags;
+
+	if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+		nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
+	if (nv_encoder && nv_connector->native_mode) {
+		nouveau_connector_set_encoder(connector, nv_encoder);
+		return connector_status_connected;
+	}
+
+	i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
+	if (i2c) {
+		nouveau_connector_ddc_prepare(connector, &flags);
+		nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
+		nouveau_connector_ddc_finish(connector, flags);
+		drm_mode_connector_update_edid_property(connector,
+							nv_connector->edid);
+		if (!nv_connector->edid) {
+			NV_ERROR(dev, "DDC responded, but no EDID for %s\n",
+				 drm_get_connector_name(connector));
+			return connector_status_disconnected;
+		}
+
+		if (nv_encoder->dcb->type == OUTPUT_DP &&
+		    !nouveau_dp_detect(to_drm_encoder(nv_encoder))) {
+			NV_ERROR(dev, "Detected %s, but failed init\n",
+				 drm_get_connector_name(connector));
+			return connector_status_disconnected;
+		}
+
+		/* Override encoder type for DVI-I based on whether EDID
+		 * says the display is digital or analog, both use the
+		 * same i2c channel so the value returned from ddc_detect
+		 * isn't necessarily correct.
+		 */
+		if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
+			if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL)
+				type = OUTPUT_TMDS;
+			else
+				type = OUTPUT_ANALOG;
+
+			nv_encoder = find_encoder_by_type(connector, type);
+			if (!nv_encoder) {
+				NV_ERROR(dev, "Detected %d encoder on %s, "
+					      "but no object!\n", type,
+					 drm_get_connector_name(connector));
+				return connector_status_disconnected;
+			}
+		}
+
+		nouveau_connector_set_encoder(connector, nv_encoder);
+		return connector_status_connected;
+	}
+
+	nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
+	if (!nv_encoder)
+		nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
+	if (nv_encoder) {
+		struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
+		struct drm_encoder_helper_funcs *helper =
+						encoder->helper_private;
+
+		if (helper->detect(encoder, connector) ==
+						connector_status_connected) {
+			nouveau_connector_set_encoder(connector, nv_encoder);
+			return connector_status_connected;
+		}
+
+	}
+
+	return connector_status_disconnected;
+}
+
+static void
+nouveau_connector_force(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct nouveau_encoder *nv_encoder;
+	int type;
+
+	if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
+		if (connector->force == DRM_FORCE_ON_DIGITAL)
+			type = OUTPUT_TMDS;
+		else
+			type = OUTPUT_ANALOG;
+	} else
+		type = OUTPUT_ANY;
+
+	nv_encoder = find_encoder_by_type(connector, type);
+	if (!nv_encoder) {
+		NV_ERROR(dev, "can't find encoder to force %s on!\n",
+			 drm_get_connector_name(connector));
+		connector->status = connector_status_disconnected;
+		return;
+	}
+
+	nouveau_connector_set_encoder(connector, nv_encoder);
+}
+
+static int
+nouveau_connector_set_property(struct drm_connector *connector,
+			       struct drm_property *property, uint64_t value)
+{
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+	struct drm_device *dev = connector->dev;
+	int ret;
+
+	/* Scaling mode */
+	if (property == dev->mode_config.scaling_mode_property) {
+		struct nouveau_crtc *nv_crtc = NULL;
+		bool modeset = false;
+
+		switch (value) {
+		case DRM_MODE_SCALE_NONE:
+		case DRM_MODE_SCALE_FULLSCREEN:
+		case DRM_MODE_SCALE_CENTER:
+		case DRM_MODE_SCALE_ASPECT:
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		/* LVDS always needs gpu scaling */
+		if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS &&
+		    value == DRM_MODE_SCALE_NONE)
+			return -EINVAL;
+
+		/* Changing between GPU and panel scaling requires a full
+		 * modeset
+		 */
+		if ((nv_connector->scaling_mode == DRM_MODE_SCALE_NONE) ||
+		    (value == DRM_MODE_SCALE_NONE))
+			modeset = true;
+		nv_connector->scaling_mode = value;
+
+		if (connector->encoder && connector->encoder->crtc)
+			nv_crtc = nouveau_crtc(connector->encoder->crtc);
+		if (!nv_crtc)
+			return 0;
+
+		if (modeset || !nv_crtc->set_scale) {
+			ret = drm_crtc_helper_set_mode(&nv_crtc->base,
+							&nv_crtc->base.mode,
+							nv_crtc->base.x,
+							nv_crtc->base.y, NULL);
+			if (!ret)
+				return -EINVAL;
+		} else {
+			ret = nv_crtc->set_scale(nv_crtc, value, true);
+			if (ret)
+				return ret;
+		}
+
+		return 0;
+	}
+
+	/* Dithering */
+	if (property == dev->mode_config.dithering_mode_property) {
+		struct nouveau_crtc *nv_crtc = NULL;
+
+		if (value == DRM_MODE_DITHERING_ON)
+			nv_connector->use_dithering = true;
+		else
+			nv_connector->use_dithering = false;
+
+		if (connector->encoder && connector->encoder->crtc)
+			nv_crtc = nouveau_crtc(connector->encoder->crtc);
+
+		if (!nv_crtc || !nv_crtc->set_dither)
+			return 0;
+
+		return nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering,
+					   true);
+	}
+
+	if (nv_encoder && nv_encoder->dcb->type == OUTPUT_TV)
+		return get_slave_funcs(nv_encoder)->
+			set_property(to_drm_encoder(nv_encoder), connector, property, value);
+
+	return -EINVAL;
+}
+
+static struct drm_display_mode *
+nouveau_connector_native_mode(struct nouveau_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_display_mode *mode, *largest = NULL;
+	int high_w = 0, high_h = 0, high_v = 0;
+
+	/* Use preferred mode if there is one.. */
+	list_for_each_entry(mode, &connector->base.probed_modes, head) {
+		if (mode->type & DRM_MODE_TYPE_PREFERRED) {
+			NV_DEBUG(dev, "native mode from preferred\n");
+			return drm_mode_duplicate(dev, mode);
+		}
+	}
+
+	/* Otherwise, take the resolution with the largest width, then height,
+	 * then vertical refresh
+	 */
+	list_for_each_entry(mode, &connector->base.probed_modes, head) {
+		if (mode->hdisplay < high_w)
+			continue;
+
+		if (mode->hdisplay == high_w && mode->vdisplay < high_h)
+			continue;
+
+		if (mode->hdisplay == high_w && mode->vdisplay == high_h &&
+		    mode->vrefresh < high_v)
+			continue;
+
+		high_w = mode->hdisplay;
+		high_h = mode->vdisplay;
+		high_v = mode->vrefresh;
+		largest = mode;
+	}
+
+	NV_DEBUG(dev, "native mode from largest: %dx%d@%d\n",
+		      high_w, high_h, high_v);
+	return largest ? drm_mode_duplicate(dev, largest) : NULL;
+}
+
+struct moderec {
+	int hdisplay;
+	int vdisplay;
+};
+
+static struct moderec scaler_modes[] = {
+	{ 1920, 1200 },
+	{ 1920, 1080 },
+	{ 1680, 1050 },
+	{ 1600, 1200 },
+	{ 1400, 1050 },
+	{ 1280, 1024 },
+	{ 1280, 960 },
+	{ 1152, 864 },
+	{ 1024, 768 },
+	{ 800, 600 },
+	{ 720, 400 },
+	{ 640, 480 },
+	{ 640, 400 },
+	{ 640, 350 },
+	{}
+};
+
+static int
+nouveau_connector_scaler_modes_add(struct drm_connector *connector)
+{
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	struct drm_display_mode *native = nv_connector->native_mode, *m;
+	struct drm_device *dev = connector->dev;
+	struct moderec *mode = &scaler_modes[0];
+	int modes = 0;
+
+	if (!native)
+		return 0;
+
+	while (mode->hdisplay) {
+		if (mode->hdisplay <= native->hdisplay &&
+		    mode->vdisplay <= native->vdisplay) {
+			m = drm_cvt_mode(dev, mode->hdisplay, mode->vdisplay,
+					 drm_mode_vrefresh(native), false,
+					 false, false);
+			if (!m)
+				continue;
+
+			m->type |= DRM_MODE_TYPE_DRIVER;
+
+			drm_mode_probed_add(connector, m);
+			modes++;
+		}
+
+		mode++;
+	}
+
+	return modes;
+}
+
+static int
+nouveau_connector_get_modes(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+	int ret = 0;
+
+	/* If we're not LVDS, destroy the previous native mode, the attached
+	 * monitor could have changed.
+	 */
+	if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
+	    nv_connector->native_mode) {
+		drm_mode_destroy(dev, nv_connector->native_mode);
+		nv_connector->native_mode = NULL;
+	}
+
+	if (nv_connector->edid)
+		ret = drm_add_edid_modes(connector, nv_connector->edid);
+
+	/* Find the native mode if this is a digital panel, if we didn't
+	 * find any modes through DDC previously add the native mode to
+	 * the list of modes.
+	 */
+	if (!nv_connector->native_mode)
+		nv_connector->native_mode =
+			nouveau_connector_native_mode(nv_connector);
+	if (ret == 0 && nv_connector->native_mode) {
+		struct drm_display_mode *mode;
+
+		mode = drm_mode_duplicate(dev, nv_connector->native_mode);
+		drm_mode_probed_add(connector, mode);
+		ret = 1;
+	}
+
+	if (nv_encoder->dcb->type == OUTPUT_TV)
+		ret = get_slave_funcs(nv_encoder)->
+			get_modes(to_drm_encoder(nv_encoder), connector);
+
+	if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+		ret += nouveau_connector_scaler_modes_add(connector);
+
+	return ret;
+}
+
+static int
+nouveau_connector_mode_valid(struct drm_connector *connector,
+			     struct drm_display_mode *mode)
+{
+	struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+	unsigned min_clock = 25000, max_clock = min_clock;
+	unsigned clock = mode->clock;
+
+	switch (nv_encoder->dcb->type) {
+	case OUTPUT_LVDS:
+		BUG_ON(!nv_connector->native_mode);
+		if (mode->hdisplay > nv_connector->native_mode->hdisplay ||
+		    mode->vdisplay > nv_connector->native_mode->vdisplay)
+			return MODE_PANEL;
+
+		min_clock = 0;
+		max_clock = 400000;
+		break;
+	case OUTPUT_TMDS:
+		if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) ||
+		    (dev_priv->card_type < NV_50 &&
+		     !nv_encoder->dcb->duallink_possible))
+			max_clock = 165000;
+		else
+			max_clock = 330000;
+		break;
+	case OUTPUT_ANALOG:
+		max_clock = nv_encoder->dcb->crtconf.maxfreq;
+		if (!max_clock)
+			max_clock = 350000;
+		break;
+	case OUTPUT_TV:
+		return get_slave_funcs(nv_encoder)->
+			mode_valid(to_drm_encoder(nv_encoder), mode);
+	case OUTPUT_DP:
+		if (nv_encoder->dp.link_bw == DP_LINK_BW_2_7)
+			max_clock = nv_encoder->dp.link_nr * 270000;
+		else
+			max_clock = nv_encoder->dp.link_nr * 162000;
+
+		clock *= 3;
+		break;
+	}
+
+	if (clock < min_clock)
+		return MODE_CLOCK_LOW;
+
+	if (clock > max_clock)
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static struct drm_encoder *
+nouveau_connector_best_encoder(struct drm_connector *connector)
+{
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+
+	if (nv_connector->detected_encoder)
+		return to_drm_encoder(nv_connector->detected_encoder);
+
+	return NULL;
+}
+
+static const struct drm_connector_helper_funcs
+nouveau_connector_helper_funcs = {
+	.get_modes = nouveau_connector_get_modes,
+	.mode_valid = nouveau_connector_mode_valid,
+	.best_encoder = nouveau_connector_best_encoder,
+};
+
+static const struct drm_connector_funcs
+nouveau_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.save = NULL,
+	.restore = NULL,
+	.detect = nouveau_connector_detect,
+	.destroy = nouveau_connector_destroy,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = nouveau_connector_set_property,
+	.force = nouveau_connector_force
+};
+
+static int
+nouveau_connector_create_lvds(struct drm_device *dev,
+			      struct drm_connector *connector)
+{
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_i2c_chan *i2c = NULL;
+	struct nouveau_encoder *nv_encoder;
+	struct drm_display_mode native, *mode, *temp;
+	bool dummy, if_is_24bit = false;
+	int ret, flags;
+
+	nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
+	if (!nv_encoder)
+		return -ENODEV;
+
+	ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &if_is_24bit);
+	if (ret) {
+		NV_ERROR(dev, "Error parsing LVDS table, disabling LVDS\n");
+		return ret;
+	}
+	nv_connector->use_dithering = !if_is_24bit;
+
+	/* Firstly try getting EDID over DDC, if allowed and I2C channel
+	 * is available.
+	 */
+	if (!dev_priv->VBIOS.pub.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf)
+		i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+
+	if (i2c) {
+		nouveau_connector_ddc_prepare(connector, &flags);
+		nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
+		nouveau_connector_ddc_finish(connector, flags);
+	}
+
+	/* If no EDID found above, and the VBIOS indicates a hardcoded
+	 * modeline is avalilable for the panel, set it as the panel's
+	 * native mode and exit.
+	 */
+	if (!nv_connector->edid && nouveau_bios_fp_mode(dev, &native) &&
+	     (nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
+	      dev_priv->VBIOS.pub.fp_no_ddc)) {
+		nv_connector->native_mode = drm_mode_duplicate(dev, &native);
+		goto out;
+	}
+
+	/* Still nothing, some VBIOS images have a hardcoded EDID block
+	 * stored for the panel stored in them.
+	 */
+	if (!nv_connector->edid && !nv_connector->native_mode &&
+	    !dev_priv->VBIOS.pub.fp_no_ddc) {
+		nv_connector->edid =
+			(struct edid *)nouveau_bios_embedded_edid(dev);
+	}
+
+	if (!nv_connector->edid)
+		goto out;
+
+	/* We didn't find/use a panel mode from the VBIOS, so parse the EDID
+	 * block and look for the preferred mode there.
+	 */
+	ret = drm_add_edid_modes(connector, nv_connector->edid);
+	if (ret == 0)
+		goto out;
+	nv_connector->detected_encoder = nv_encoder;
+	nv_connector->native_mode = nouveau_connector_native_mode(nv_connector);
+	list_for_each_entry_safe(mode, temp, &connector->probed_modes, head)
+		drm_mode_remove(connector, mode);
+
+out:
+	if (!nv_connector->native_mode) {
+		NV_ERROR(dev, "LVDS present in DCB table, but couldn't "
+			      "determine its native mode.  Disabling.\n");
+		return -ENODEV;
+	}
+
+	drm_mode_connector_update_edid_property(connector, nv_connector->edid);
+	return 0;
+}
+
+int
+nouveau_connector_create(struct drm_device *dev, int index, int type)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_connector *nv_connector = NULL;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	int ret;
+
+	NV_DEBUG(dev, "\n");
+
+	nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
+	if (!nv_connector)
+		return -ENOMEM;
+	nv_connector->dcb = nouveau_bios_connector_entry(dev, index);
+	connector = &nv_connector->base;
+
+	switch (type) {
+	case DRM_MODE_CONNECTOR_VGA:
+		NV_INFO(dev, "Detected a VGA connector\n");
+		break;
+	case DRM_MODE_CONNECTOR_DVID:
+		NV_INFO(dev, "Detected a DVI-D connector\n");
+		break;
+	case DRM_MODE_CONNECTOR_DVII:
+		NV_INFO(dev, "Detected a DVI-I connector\n");
+		break;
+	case DRM_MODE_CONNECTOR_LVDS:
+		NV_INFO(dev, "Detected a LVDS connector\n");
+		break;
+	case DRM_MODE_CONNECTOR_TV:
+		NV_INFO(dev, "Detected a TV connector\n");
+		break;
+	case DRM_MODE_CONNECTOR_DisplayPort:
+		NV_INFO(dev, "Detected a DisplayPort connector\n");
+		break;
+	default:
+		NV_ERROR(dev, "Unknown connector, this is not good.\n");
+		break;
+	}
+
+	/* defaults, will get overridden in detect() */
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+
+	drm_connector_init(dev, connector, &nouveau_connector_funcs, type);
+	drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
+
+	/* Init DVI-I specific properties */
+	if (type == DRM_MODE_CONNECTOR_DVII) {
+		drm_mode_create_dvi_i_properties(dev);
+		drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
+		drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0);
+	}
+
+	if (type != DRM_MODE_CONNECTOR_LVDS)
+		nv_connector->use_dithering = false;
+
+	if (type == DRM_MODE_CONNECTOR_DVID ||
+	    type == DRM_MODE_CONNECTOR_DVII ||
+	    type == DRM_MODE_CONNECTOR_LVDS ||
+	    type == DRM_MODE_CONNECTOR_DisplayPort) {
+		nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
+
+		drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property,
+					      nv_connector->scaling_mode);
+		drm_connector_attach_property(connector, dev->mode_config.dithering_mode_property,
+					      nv_connector->use_dithering ? DRM_MODE_DITHERING_ON
+					      : DRM_MODE_DITHERING_OFF);
+
+	} else {
+		nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
+
+		if (type == DRM_MODE_CONNECTOR_VGA  &&
+				dev_priv->card_type >= NV_50) {
+			drm_connector_attach_property(connector,
+					dev->mode_config.scaling_mode_property,
+					nv_connector->scaling_mode);
+		}
+	}
+
+	/* attach encoders */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+		if (nv_encoder->dcb->connector != index)
+			continue;
+
+		if (get_slave_funcs(nv_encoder))
+			get_slave_funcs(nv_encoder)->create_resources(encoder, connector);
+
+		drm_mode_connector_attach_encoder(connector, encoder);
+	}
+
+	drm_sysfs_connector_add(connector);
+
+	if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+		ret = nouveau_connector_create_lvds(dev, connector);
+		if (ret) {
+			connector->funcs->destroy(connector);
+			return ret;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
new file mode 100644
index 0000000..728b809
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_CONNECTOR_H__
+#define __NOUVEAU_CONNECTOR_H__
+
+#include "drm_edid.h"
+#include "nouveau_i2c.h"
+
+struct nouveau_connector {
+	struct drm_connector base;
+
+	struct dcb_connector_table_entry *dcb;
+
+	int scaling_mode;
+	bool use_dithering;
+
+	struct nouveau_encoder *detected_encoder;
+	struct edid *edid;
+	struct drm_display_mode *native_mode;
+};
+
+static inline struct nouveau_connector *nouveau_connector(
+						struct drm_connector *con)
+{
+	return container_of(con, struct nouveau_connector, base);
+}
+
+int nouveau_connector_create(struct drm_device *dev, int i2c_index, int type);
+
+#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
new file mode 100644
index 0000000..49fa7b2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_CRTC_H__
+#define __NOUVEAU_CRTC_H__
+
+struct nouveau_crtc {
+	struct drm_crtc base;
+
+	int index;
+
+	struct drm_display_mode *mode;
+
+	uint32_t dpms_saved_fp_control;
+	uint32_t fp_users;
+	int saturation;
+	int sharpness;
+	int last_dpms;
+
+	struct {
+		int cpp;
+		bool blanked;
+		uint32_t offset;
+		uint32_t tile_flags;
+	} fb;
+
+	struct {
+		struct nouveau_bo *nvbo;
+		bool visible;
+		uint32_t offset;
+		void (*set_offset)(struct nouveau_crtc *, uint32_t offset);
+		void (*set_pos)(struct nouveau_crtc *, int x, int y);
+		void (*hide)(struct nouveau_crtc *, bool update);
+		void (*show)(struct nouveau_crtc *, bool update);
+	} cursor;
+
+	struct {
+		struct nouveau_bo *nvbo;
+		uint16_t r[256];
+		uint16_t g[256];
+		uint16_t b[256];
+		int depth;
+	} lut;
+
+	int (*set_dither)(struct nouveau_crtc *crtc, bool on, bool update);
+	int (*set_scale)(struct nouveau_crtc *crtc, int mode, bool update);
+};
+
+static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc)
+{
+	return container_of(crtc, struct nouveau_crtc, base);
+}
+
+static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc)
+{
+	return &crtc->base;
+}
+
+int nv50_crtc_create(struct drm_device *dev, int index);
+int nv50_cursor_init(struct nouveau_crtc *);
+void nv50_cursor_fini(struct nouveau_crtc *);
+int nv50_crtc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file_priv,
+			 uint32_t buffer_handle, uint32_t width,
+			 uint32_t height);
+int nv50_crtc_cursor_move(struct drm_crtc *drm_crtc, int x, int y);
+
+int nv04_cursor_init(struct nouveau_crtc *);
+
+struct nouveau_connector *
+nouveau_crtc_connector_get(struct nouveau_crtc *crtc);
+
+#endif /* __NOUVEAU_CRTC_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
new file mode 100644
index 0000000..d79db36
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2009 Red Hat <bskeggs@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ *  Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <linux/debugfs.h>
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+
+static int
+nouveau_debugfs_channel_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct nouveau_channel *chan = node->info_ent->data;
+
+	seq_printf(m, "channel id    : %d\n", chan->id);
+
+	seq_printf(m, "cpu fifo state:\n");
+	seq_printf(m, "          base: 0x%08x\n", chan->pushbuf_base);
+	seq_printf(m, "           max: 0x%08x\n", chan->dma.max << 2);
+	seq_printf(m, "           cur: 0x%08x\n", chan->dma.cur << 2);
+	seq_printf(m, "           put: 0x%08x\n", chan->dma.put << 2);
+	seq_printf(m, "          free: 0x%08x\n", chan->dma.free << 2);
+
+	seq_printf(m, "gpu fifo state:\n");
+	seq_printf(m, "           get: 0x%08x\n",
+					nvchan_rd32(chan, chan->user_get));
+	seq_printf(m, "           put: 0x%08x\n",
+					nvchan_rd32(chan, chan->user_put));
+
+	seq_printf(m, "last fence    : %d\n", chan->fence.sequence);
+	seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack);
+	return 0;
+}
+
+int
+nouveau_debugfs_channel_init(struct nouveau_channel *chan)
+{
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+	struct drm_minor *minor = chan->dev->primary;
+	int ret;
+
+	if (!dev_priv->debugfs.channel_root) {
+		dev_priv->debugfs.channel_root =
+			debugfs_create_dir("channel", minor->debugfs_root);
+		if (!dev_priv->debugfs.channel_root)
+			return -ENOENT;
+	}
+
+	snprintf(chan->debugfs.name, 32, "%d", chan->id);
+	chan->debugfs.info.name = chan->debugfs.name;
+	chan->debugfs.info.show = nouveau_debugfs_channel_info;
+	chan->debugfs.info.driver_features = 0;
+	chan->debugfs.info.data = chan;
+
+	ret = drm_debugfs_create_files(&chan->debugfs.info, 1,
+				       dev_priv->debugfs.channel_root,
+				       chan->dev->primary);
+	if (ret == 0)
+		chan->debugfs.active = true;
+	return ret;
+}
+
+void
+nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
+{
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+
+	if (!chan->debugfs.active)
+		return;
+
+	drm_debugfs_remove_files(&chan->debugfs.info, 1, chan->dev->primary);
+	chan->debugfs.active = false;
+
+	if (chan == dev_priv->channel) {
+		debugfs_remove(dev_priv->debugfs.channel_root);
+		dev_priv->debugfs.channel_root = NULL;
+	}
+}
+
+static int
+nouveau_debugfs_chipset_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_minor *minor = node->minor;
+	struct drm_device *dev = minor->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t ppci_0;
+
+	ppci_0 = nv_rd32(dev, dev_priv->chipset >= 0x40 ? 0x88000 : 0x1800);
+
+	seq_printf(m, "PMC_BOOT_0: 0x%08x\n", nv_rd32(dev, NV03_PMC_BOOT_0));
+	seq_printf(m, "PCI ID    : 0x%04x:0x%04x\n",
+		   ppci_0 & 0xffff, ppci_0 >> 16);
+	return 0;
+}
+
+static int
+nouveau_debugfs_memory_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_minor *minor = node->minor;
+	struct drm_device *dev = minor->dev;
+
+	seq_printf(m, "VRAM total: %dKiB\n",
+		   (int)(nouveau_mem_fb_amount(dev) >> 10));
+	return 0;
+}
+
+static struct drm_info_list nouveau_debugfs_list[] = {
+	{ "chipset", nouveau_debugfs_chipset_info, 0, NULL },
+	{ "memory", nouveau_debugfs_memory_info, 0, NULL },
+};
+#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
+
+int
+nouveau_debugfs_init(struct drm_minor *minor)
+{
+	drm_debugfs_create_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
+				 minor->debugfs_root, minor);
+	return 0;
+}
+
+void
+nouveau_debugfs_takedown(struct drm_minor *minor)
+{
+	drm_debugfs_remove_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
+				 minor);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
new file mode 100644
index 0000000..dfc9439
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+#include "nouveau_drv.h"
+#include "nouveau_fb.h"
+#include "nouveau_fbcon.h"
+
+static void
+nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
+{
+	struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+	struct drm_device *dev = drm_fb->dev;
+
+	if (drm_fb->fbdev)
+		nouveau_fbcon_remove(dev, drm_fb);
+
+	if (fb->nvbo) {
+		mutex_lock(&dev->struct_mutex);
+		drm_gem_object_unreference(fb->nvbo->gem);
+		mutex_unlock(&dev->struct_mutex);
+	}
+
+	drm_framebuffer_cleanup(drm_fb);
+	kfree(fb);
+}
+
+static int
+nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb,
+				       struct drm_file *file_priv,
+				       unsigned int *handle)
+{
+	struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+
+	return drm_gem_handle_create(file_priv, fb->nvbo->gem, handle);
+}
+
+static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
+	.destroy = nouveau_user_framebuffer_destroy,
+	.create_handle = nouveau_user_framebuffer_create_handle,
+};
+
+struct drm_framebuffer *
+nouveau_framebuffer_create(struct drm_device *dev, struct nouveau_bo *nvbo,
+			   struct drm_mode_fb_cmd *mode_cmd)
+{
+	struct nouveau_framebuffer *fb;
+	int ret;
+
+	fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
+	if (!fb)
+		return NULL;
+
+	ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
+	if (ret) {
+		kfree(fb);
+		return NULL;
+	}
+
+	drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
+
+	fb->nvbo = nvbo;
+	return &fb->base;
+}
+
+static struct drm_framebuffer *
+nouveau_user_framebuffer_create(struct drm_device *dev,
+				struct drm_file *file_priv,
+				struct drm_mode_fb_cmd *mode_cmd)
+{
+	struct drm_framebuffer *fb;
+	struct drm_gem_object *gem;
+
+	gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
+	if (!gem)
+		return NULL;
+
+	fb = nouveau_framebuffer_create(dev, nouveau_gem_object(gem), mode_cmd);
+	if (!fb) {
+		drm_gem_object_unreference(gem);
+		return NULL;
+	}
+
+	return fb;
+}
+
+const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
+	.fb_create = nouveau_user_framebuffer_create,
+	.fb_changed = nouveau_fbcon_probe,
+};
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
new file mode 100644
index 0000000..7035536
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+
+int
+nouveau_dma_init(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj *m2mf = NULL;
+	int ret, i;
+
+	/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
+	ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ?
+				    0x0039 : 0x5039, &m2mf);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_ref_add(dev, chan, NvM2MF, m2mf, NULL);
+	if (ret)
+		return ret;
+
+	/* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
+	ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy);
+	if (ret)
+		return ret;
+
+	/* Map push buffer */
+	ret = nouveau_bo_map(chan->pushbuf_bo);
+	if (ret)
+		return ret;
+
+	/* Map M2MF notifier object - fbcon. */
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		ret = nouveau_bo_map(chan->notifier_bo);
+		if (ret)
+			return ret;
+	}
+
+	/* Initialise DMA vars */
+	chan->dma.max  = (chan->pushbuf_bo->bo.mem.size >> 2) - 2;
+	chan->dma.put  = 0;
+	chan->dma.cur  = chan->dma.put;
+	chan->dma.free = chan->dma.max - chan->dma.cur;
+
+	/* Insert NOPS for NOUVEAU_DMA_SKIPS */
+	ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
+		OUT_RING(chan, 0);
+
+	/* Initialise NV_MEMORY_TO_MEMORY_FORMAT */
+	ret = RING_SPACE(chan, 4);
+	if (ret)
+		return ret;
+	BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
+	OUT_RING(chan, NvM2MF);
+	BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
+	OUT_RING(chan, NvNotify0);
+
+	/* Sit back and pray the channel works.. */
+	FIRE_RING(chan);
+
+	return 0;
+}
+
+void
+OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
+{
+	bool is_iomem;
+	u32 *mem = ttm_kmap_obj_virtual(&chan->pushbuf_bo->kmap, &is_iomem);
+	mem = &mem[chan->dma.cur];
+	if (is_iomem)
+		memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
+	else
+		memcpy(mem, data, nr_dwords * 4);
+	chan->dma.cur += nr_dwords;
+}
+
+static inline bool
+READ_GET(struct nouveau_channel *chan, uint32_t *get)
+{
+	uint32_t val;
+
+	val = nvchan_rd32(chan, chan->user_get);
+	if (val < chan->pushbuf_base ||
+	    val >= chan->pushbuf_base + chan->pushbuf_bo->bo.mem.size) {
+		/* meaningless to dma_wait() except to know whether the
+		 * GPU has stalled or not
+		 */
+		*get = val;
+		return false;
+	}
+
+	*get = (val - chan->pushbuf_base) >> 2;
+	return true;
+}
+
+int
+nouveau_dma_wait(struct nouveau_channel *chan, int size)
+{
+	uint32_t get, prev_get = 0, cnt = 0;
+	bool get_valid;
+
+	while (chan->dma.free < size) {
+		/* reset counter as long as GET is still advancing, this is
+		 * to avoid misdetecting a GPU lockup if the GPU happens to
+		 * just be processing an operation that takes a long time
+		 */
+		get_valid = READ_GET(chan, &get);
+		if (get != prev_get) {
+			prev_get = get;
+			cnt = 0;
+		}
+
+		if ((++cnt & 0xff) == 0) {
+			DRM_UDELAY(1);
+			if (cnt > 100000)
+				return -EBUSY;
+		}
+
+		/* loop until we have a usable GET pointer.  the value
+		 * we read from the GPU may be outside the main ring if
+		 * PFIFO is processing a buffer called from the main ring,
+		 * discard these values until something sensible is seen.
+		 *
+		 * the other case we discard GET is while the GPU is fetching
+		 * from the SKIPS area, so the code below doesn't have to deal
+		 * with some fun corner cases.
+		 */
+		if (!get_valid || get < NOUVEAU_DMA_SKIPS)
+			continue;
+
+		if (get <= chan->dma.cur) {
+			/* engine is fetching behind us, or is completely
+			 * idle (GET == PUT) so we have free space up until
+			 * the end of the push buffer
+			 *
+			 * we can only hit that path once per call due to
+			 * looping back to the beginning of the push buffer,
+			 * we'll hit the fetching-ahead-of-us path from that
+			 * point on.
+			 *
+			 * the *one* exception to that rule is if we read
+			 * GET==PUT, in which case the below conditional will
+			 * always succeed and break us out of the wait loop.
+			 */
+			chan->dma.free = chan->dma.max - chan->dma.cur;
+			if (chan->dma.free >= size)
+				break;
+
+			/* not enough space left at the end of the push buffer,
+			 * instruct the GPU to jump back to the start right
+			 * after processing the currently pending commands.
+			 */
+			OUT_RING(chan, chan->pushbuf_base | 0x20000000);
+			WRITE_PUT(NOUVEAU_DMA_SKIPS);
+
+			/* we're now submitting commands at the start of
+			 * the push buffer.
+			 */
+			chan->dma.cur  =
+			chan->dma.put  = NOUVEAU_DMA_SKIPS;
+		}
+
+		/* engine fetching ahead of us, we have space up until the
+		 * current GET pointer.  the "- 1" is to ensure there's
+		 * space left to emit a jump back to the beginning of the
+		 * push buffer if we require it.  we can never get GET == PUT
+		 * here, so this is safe.
+		 */
+		chan->dma.free = get - chan->dma.cur - 1;
+	}
+
+	return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
new file mode 100644
index 0000000..04e85d8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_DMA_H__
+#define __NOUVEAU_DMA_H__
+
+#ifndef NOUVEAU_DMA_DEBUG
+#define NOUVEAU_DMA_DEBUG 0
+#endif
+
+/*
+ * There's a hw race condition where you can't jump to your PUT offset,
+ * to avoid this we jump to offset + SKIPS and fill the difference with
+ * NOPs.
+ *
+ * xf86-video-nv configures the DMA fetch size to 32 bytes, and uses
+ * a SKIPS value of 8.  Lets assume that the race condition is to do
+ * with writing into the fetch area, we configure a fetch size of 128
+ * bytes so we need a larger SKIPS value.
+ */
+#define NOUVEAU_DMA_SKIPS (128 / 4)
+
+/* Hardcoded object assignments to subchannels (subchannel id). */
+enum {
+	NvSubM2MF	= 0,
+	NvSub2D		= 1,
+	NvSubCtxSurf2D  = 1,
+	NvSubGdiRect    = 2,
+	NvSubImageBlit  = 3
+};
+
+/* Object handles. */
+enum {
+	NvM2MF		= 0x80000001,
+	NvDmaFB		= 0x80000002,
+	NvDmaTT		= 0x80000003,
+	NvDmaVRAM	= 0x80000004,
+	NvDmaGART	= 0x80000005,
+	NvNotify0       = 0x80000006,
+	Nv2D		= 0x80000007,
+	NvCtxSurf2D	= 0x80000008,
+	NvRop		= 0x80000009,
+	NvImagePatt	= 0x8000000a,
+	NvClipRect	= 0x8000000b,
+	NvGdiRect	= 0x8000000c,
+	NvImageBlit	= 0x8000000d,
+
+	/* G80+ display objects */
+	NvEvoVRAM	= 0x01000000,
+	NvEvoFB16	= 0x01000001,
+	NvEvoFB32	= 0x01000002
+};
+
+#define NV_MEMORY_TO_MEMORY_FORMAT                                    0x00000039
+#define NV_MEMORY_TO_MEMORY_FORMAT_NAME                               0x00000000
+#define NV_MEMORY_TO_MEMORY_FORMAT_SET_REF                            0x00000050
+#define NV_MEMORY_TO_MEMORY_FORMAT_NOP                                0x00000100
+#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY                             0x00000104
+#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE                 0x00000000
+#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE_LE_AWAKEN       0x00000001
+#define NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY                         0x00000180
+#define NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE                         0x00000184
+#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN                          0x0000030c
+
+#define NV50_MEMORY_TO_MEMORY_FORMAT                                  0x00005039
+#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK200                           0x00000200
+#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK21C                           0x0000021c
+#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH                   0x00000238
+#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_OUT_HIGH                  0x0000023c
+
+static __must_check inline int
+RING_SPACE(struct nouveau_channel *chan, int size)
+{
+	if (chan->dma.free < size) {
+		int ret;
+
+		ret = nouveau_dma_wait(chan, size);
+		if (ret)
+			return ret;
+	}
+
+	chan->dma.free -= size;
+	return 0;
+}
+
+static inline void
+OUT_RING(struct nouveau_channel *chan, int data)
+{
+	if (NOUVEAU_DMA_DEBUG) {
+		NV_INFO(chan->dev, "Ch%d/0x%08x: 0x%08x\n",
+			chan->id, chan->dma.cur << 2, data);
+	}
+
+	nouveau_bo_wr32(chan->pushbuf_bo, chan->dma.cur++, data);
+}
+
+extern void
+OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords);
+
+static inline void
+BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size)
+{
+	OUT_RING(chan, (subc << 13) | (size << 18) | mthd);
+}
+
+#define WRITE_PUT(val) do {                                                    \
+	DRM_MEMORYBARRIER();                                                   \
+	nouveau_bo_rd32(chan->pushbuf_bo, 0);                                  \
+	nvchan_wr32(chan, chan->user_put, ((val) << 2) + chan->pushbuf_base);  \
+} while (0)
+
+static inline void
+FIRE_RING(struct nouveau_channel *chan)
+{
+	if (NOUVEAU_DMA_DEBUG) {
+		NV_INFO(chan->dev, "Ch%d/0x%08x: PUSH!\n",
+			chan->id, chan->dma.cur << 2);
+	}
+
+	if (chan->dma.cur == chan->dma.put)
+		return;
+	chan->accel_done = true;
+
+	WRITE_PUT(chan->dma.cur);
+	chan->dma.put = chan->dma.cur;
+}
+
+static inline void
+WIND_RING(struct nouveau_channel *chan)
+{
+	chan->dma.cur = chan->dma.put;
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
new file mode 100644
index 0000000..de61f46
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -0,0 +1,569 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_i2c.h"
+#include "nouveau_encoder.h"
+
+static int
+auxch_rd(struct drm_encoder *encoder, int address, uint8_t *buf, int size)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_i2c_chan *auxch;
+	int ret;
+
+	auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+	if (!auxch)
+		return -ENODEV;
+
+	ret = nouveau_dp_auxch(auxch, 9, address, buf, size);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int
+auxch_wr(struct drm_encoder *encoder, int address, uint8_t *buf, int size)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_i2c_chan *auxch;
+	int ret;
+
+	auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+	if (!auxch)
+		return -ENODEV;
+
+	ret = nouveau_dp_auxch(auxch, 8, address, buf, size);
+	return ret;
+}
+
+static int
+nouveau_dp_lane_count_set(struct drm_encoder *encoder, uint8_t cmd)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	uint32_t tmp;
+	int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
+
+	tmp  = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+	tmp &= ~(NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED |
+		 NV50_SOR_DP_CTRL_LANE_MASK);
+	tmp |= ((1 << (cmd & DP_LANE_COUNT_MASK)) - 1) << 16;
+	if (cmd & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+		tmp |= NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED;
+	nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp);
+
+	return auxch_wr(encoder, DP_LANE_COUNT_SET, &cmd, 1);
+}
+
+static int
+nouveau_dp_link_bw_set(struct drm_encoder *encoder, uint8_t cmd)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	uint32_t tmp;
+	int reg = 0x614300 + (nv_encoder->or * 0x800);
+
+	tmp  = nv_rd32(dev, reg);
+	tmp &= 0xfff3ffff;
+	if (cmd == DP_LINK_BW_2_7)
+		tmp |= 0x00040000;
+	nv_wr32(dev, reg, tmp);
+
+	return auxch_wr(encoder, DP_LINK_BW_SET, &cmd, 1);
+}
+
+static int
+nouveau_dp_link_train_set(struct drm_encoder *encoder, int pattern)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	uint32_t tmp;
+	uint8_t cmd;
+	int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
+	int ret;
+
+	tmp  = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+	tmp &= ~NV50_SOR_DP_CTRL_TRAINING_PATTERN;
+	tmp |= (pattern << 24);
+	nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp);
+
+	ret = auxch_rd(encoder, DP_TRAINING_PATTERN_SET, &cmd, 1);
+	if (ret)
+		return ret;
+	cmd &= ~DP_TRAINING_PATTERN_MASK;
+	cmd |= (pattern & DP_TRAINING_PATTERN_MASK);
+	return auxch_wr(encoder, DP_TRAINING_PATTERN_SET, &cmd, 1);
+}
+
+static int
+nouveau_dp_max_voltage_swing(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct bit_displayport_encoder_table_entry *dpse;
+	struct bit_displayport_encoder_table *dpe;
+	int i, dpe_headerlen, max_vs = 0;
+
+	dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
+	if (!dpe)
+		return false;
+	dpse = (void *)((char *)dpe + dpe_headerlen);
+
+	for (i = 0; i < dpe_headerlen; i++, dpse++) {
+		if (dpse->vs_level > max_vs)
+			max_vs = dpse->vs_level;
+	}
+
+	return max_vs;
+}
+
+static int
+nouveau_dp_max_pre_emphasis(struct drm_encoder *encoder, int vs)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct bit_displayport_encoder_table_entry *dpse;
+	struct bit_displayport_encoder_table *dpe;
+	int i, dpe_headerlen, max_pre = 0;
+
+	dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
+	if (!dpe)
+		return false;
+	dpse = (void *)((char *)dpe + dpe_headerlen);
+
+	for (i = 0; i < dpe_headerlen; i++, dpse++) {
+		if (dpse->vs_level != vs)
+			continue;
+
+		if (dpse->pre_level > max_pre)
+			max_pre = dpse->pre_level;
+	}
+
+	return max_pre;
+}
+
+static bool
+nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct bit_displayport_encoder_table_entry *dpse;
+	struct bit_displayport_encoder_table *dpe;
+	int ret, i, dpe_headerlen, vs = 0, pre = 0;
+	uint8_t request[2];
+
+	dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
+	if (!dpe)
+		return false;
+	dpse = (void *)((char *)dpe + dpe_headerlen);
+
+	ret = auxch_rd(encoder, DP_ADJUST_REQUEST_LANE0_1, request, 2);
+	if (ret)
+		return false;
+
+	NV_DEBUG(dev, "\t\tadjust 0x%02x 0x%02x\n", request[0], request[1]);
+
+	/* Keep all lanes at the same level.. */
+	for (i = 0; i < nv_encoder->dp.link_nr; i++) {
+		int lane_req = (request[i >> 1] >> ((i & 1) << 2)) & 0xf;
+		int lane_vs = lane_req & 3;
+		int lane_pre = (lane_req >> 2) & 3;
+
+		if (lane_vs > vs)
+			vs = lane_vs;
+		if (lane_pre > pre)
+			pre = lane_pre;
+	}
+
+	if (vs >= nouveau_dp_max_voltage_swing(encoder)) {
+		vs  = nouveau_dp_max_voltage_swing(encoder);
+		vs |= 4;
+	}
+
+	if (pre >= nouveau_dp_max_pre_emphasis(encoder, vs & 3)) {
+		pre  = nouveau_dp_max_pre_emphasis(encoder, vs & 3);
+		pre |= 4;
+	}
+
+	/* Update the configuration for all lanes.. */
+	for (i = 0; i < nv_encoder->dp.link_nr; i++)
+		config[i] = (pre << 3) | vs;
+
+	return true;
+}
+
+static bool
+nouveau_dp_link_train_commit(struct drm_encoder *encoder, uint8_t *config)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct bit_displayport_encoder_table_entry *dpse;
+	struct bit_displayport_encoder_table *dpe;
+	int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
+	int dpe_headerlen, ret, i;
+
+	NV_DEBUG(dev, "\t\tconfig 0x%02x 0x%02x 0x%02x 0x%02x\n",
+		 config[0], config[1], config[2], config[3]);
+
+	dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
+	if (!dpe)
+		return false;
+	dpse = (void *)((char *)dpe + dpe_headerlen);
+
+	for (i = 0; i < dpe->record_nr; i++, dpse++) {
+		if (dpse->vs_level == (config[0] & 3) &&
+		    dpse->pre_level == ((config[0] >> 3) & 3))
+			break;
+	}
+	BUG_ON(i == dpe->record_nr);
+
+	for (i = 0; i < nv_encoder->dp.link_nr; i++) {
+		const int shift[4] = { 16, 8, 0, 24 };
+		uint32_t mask = 0xff << shift[i];
+		uint32_t reg0, reg1, reg2;
+
+		reg0  = nv_rd32(dev, NV50_SOR_DP_UNK118(or, link)) & ~mask;
+		reg0 |= (dpse->reg0 << shift[i]);
+		reg1  = nv_rd32(dev, NV50_SOR_DP_UNK120(or, link)) & ~mask;
+		reg1 |= (dpse->reg1 << shift[i]);
+		reg2  = nv_rd32(dev, NV50_SOR_DP_UNK130(or, link)) & 0xffff00ff;
+		reg2 |= (dpse->reg2 << 8);
+		nv_wr32(dev, NV50_SOR_DP_UNK118(or, link), reg0);
+		nv_wr32(dev, NV50_SOR_DP_UNK120(or, link), reg1);
+		nv_wr32(dev, NV50_SOR_DP_UNK130(or, link), reg2);
+	}
+
+	ret = auxch_wr(encoder, DP_TRAINING_LANE0_SET, config, 4);
+	if (ret)
+		return false;
+
+	return true;
+}
+
+bool
+nouveau_dp_link_train(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	uint8_t config[4];
+	uint8_t status[3];
+	bool cr_done, cr_max_vs, eq_done;
+	int ret = 0, i, tries, voltage;
+
+	NV_DEBUG(dev, "link training!!\n");
+train:
+	cr_done = eq_done = false;
+
+	/* set link configuration */
+	NV_DEBUG(dev, "\tbegin train: bw %d, lanes %d\n",
+		 nv_encoder->dp.link_bw, nv_encoder->dp.link_nr);
+
+	ret = nouveau_dp_link_bw_set(encoder, nv_encoder->dp.link_bw);
+	if (ret)
+		return false;
+
+	config[0] = nv_encoder->dp.link_nr;
+	if (nv_encoder->dp.dpcd_version >= 0x11)
+		config[0] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+	ret = nouveau_dp_lane_count_set(encoder, config[0]);
+	if (ret)
+		return false;
+
+	/* clock recovery */
+	NV_DEBUG(dev, "\tbegin cr\n");
+	ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_1);
+	if (ret)
+		goto stop;
+
+	tries = 0;
+	voltage = -1;
+	memset(config, 0x00, sizeof(config));
+	for (;;) {
+		if (!nouveau_dp_link_train_commit(encoder, config))
+			break;
+
+		udelay(100);
+
+		ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 2);
+		if (ret)
+			break;
+		NV_DEBUG(dev, "\t\tstatus: 0x%02x 0x%02x\n",
+			 status[0], status[1]);
+
+		cr_done = true;
+		cr_max_vs = false;
+		for (i = 0; i < nv_encoder->dp.link_nr; i++) {
+			int lane = (status[i >> 1] >> ((i & 1) * 4)) & 0xf;
+
+			if (!(lane & DP_LANE_CR_DONE)) {
+				cr_done = false;
+				if (config[i] & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED)
+					cr_max_vs = true;
+				break;
+			}
+		}
+
+		if ((config[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) {
+			voltage = config[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+			tries = 0;
+		}
+
+		if (cr_done || cr_max_vs || (++tries == 5))
+			break;
+
+		if (!nouveau_dp_link_train_adjust(encoder, config))
+			break;
+	}
+
+	if (!cr_done)
+		goto stop;
+
+	/* channel equalisation */
+	NV_DEBUG(dev, "\tbegin eq\n");
+	ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_2);
+	if (ret)
+		goto stop;
+
+	for (tries = 0; tries <= 5; tries++) {
+		udelay(400);
+
+		ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 3);
+		if (ret)
+			break;
+		NV_DEBUG(dev, "\t\tstatus: 0x%02x 0x%02x\n",
+			 status[0], status[1]);
+
+		eq_done = true;
+		if (!(status[2] & DP_INTERLANE_ALIGN_DONE))
+			eq_done = false;
+
+		for (i = 0; eq_done && i < nv_encoder->dp.link_nr; i++) {
+			int lane = (status[i >> 1] >> ((i & 1) * 4)) & 0xf;
+
+			if (!(lane & DP_LANE_CR_DONE)) {
+				cr_done = false;
+				break;
+			}
+
+			if (!(lane & DP_LANE_CHANNEL_EQ_DONE) ||
+			    !(lane & DP_LANE_SYMBOL_LOCKED)) {
+				eq_done = false;
+				break;
+			}
+		}
+
+		if (eq_done || !cr_done)
+			break;
+
+		if (!nouveau_dp_link_train_adjust(encoder, config) ||
+		    !nouveau_dp_link_train_commit(encoder, config))
+			break;
+	}
+
+stop:
+	/* end link training */
+	ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_DISABLE);
+	if (ret)
+		return false;
+
+	/* retry at a lower setting, if possible */
+	if (!ret && !(eq_done && cr_done)) {
+		NV_DEBUG(dev, "\twe failed\n");
+		if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62) {
+			NV_DEBUG(dev, "retry link training at low rate\n");
+			nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
+			goto train;
+		}
+	}
+
+	return eq_done;
+}
+
+bool
+nouveau_dp_detect(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+	uint8_t dpcd[4];
+	int ret;
+
+	ret = auxch_rd(encoder, 0x0000, dpcd, 4);
+	if (ret)
+		return false;
+
+	NV_DEBUG(dev, "encoder: link_bw %d, link_nr %d\n"
+		      "display: link_bw %d, link_nr %d version 0x%02x\n",
+		 nv_encoder->dcb->dpconf.link_bw,
+		 nv_encoder->dcb->dpconf.link_nr,
+		 dpcd[1], dpcd[2] & 0x0f, dpcd[0]);
+
+	nv_encoder->dp.dpcd_version = dpcd[0];
+
+	nv_encoder->dp.link_bw = dpcd[1];
+	if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62 &&
+	    !nv_encoder->dcb->dpconf.link_bw)
+		nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
+
+	nv_encoder->dp.link_nr = dpcd[2] & 0xf;
+	if (nv_encoder->dp.link_nr > nv_encoder->dcb->dpconf.link_nr)
+		nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr;
+
+	return true;
+}
+
+int
+nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
+		 uint8_t *data, int data_nr)
+{
+	struct drm_device *dev = auxch->dev;
+	uint32_t tmp, ctrl, stat = 0, data32[4] = {};
+	int ret = 0, i, index = auxch->rd;
+
+	NV_DEBUG(dev, "ch %d cmd %d addr 0x%x len %d\n", index, cmd, addr, data_nr);
+
+	tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
+	nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp | 0x00100000);
+	tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
+	if (!(tmp & 0x01000000)) {
+		NV_ERROR(dev, "expected bit 24 == 1, got 0x%08x\n", tmp);
+		ret = -EIO;
+		goto out;
+	}
+
+	for (i = 0; i < 3; i++) {
+		tmp = nv_rd32(dev, NV50_AUXCH_STAT(auxch->rd));
+		if (tmp & NV50_AUXCH_STAT_STATE_READY)
+			break;
+		udelay(100);
+	}
+
+	if (i == 3) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	if (!(cmd & 1)) {
+		memcpy(data32, data, data_nr);
+		for (i = 0; i < 4; i++) {
+			NV_DEBUG(dev, "wr %d: 0x%08x\n", i, data32[i]);
+			nv_wr32(dev, NV50_AUXCH_DATA_OUT(index, i), data32[i]);
+		}
+	}
+
+	nv_wr32(dev, NV50_AUXCH_ADDR(index), addr);
+	ctrl  = nv_rd32(dev, NV50_AUXCH_CTRL(index));
+	ctrl &= ~(NV50_AUXCH_CTRL_CMD | NV50_AUXCH_CTRL_LEN);
+	ctrl |= (cmd << NV50_AUXCH_CTRL_CMD_SHIFT);
+	ctrl |= ((data_nr - 1) << NV50_AUXCH_CTRL_LEN_SHIFT);
+
+	for (;;) {
+		nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000);
+		nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl);
+		nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000);
+		if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) {
+			NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n",
+				 nv_rd32(dev, NV50_AUXCH_CTRL(index)));
+			return -EBUSY;
+		}
+
+		udelay(400);
+
+		stat = nv_rd32(dev, NV50_AUXCH_STAT(index));
+		if ((stat & NV50_AUXCH_STAT_REPLY_AUX) !=
+			    NV50_AUXCH_STAT_REPLY_AUX_DEFER)
+			break;
+	}
+
+	if (cmd & 1) {
+		for (i = 0; i < 4; i++) {
+			data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
+			NV_DEBUG(dev, "rd %d: 0x%08x\n", i, data32[i]);
+		}
+		memcpy(data, data32, data_nr);
+	}
+
+out:
+	tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
+	nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp & ~0x00100000);
+	tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
+	if (tmp & 0x01000000) {
+		NV_ERROR(dev, "expected bit 24 == 0, got 0x%08x\n", tmp);
+		ret = -EIO;
+	}
+
+	udelay(400);
+
+	return ret ? ret : (stat & NV50_AUXCH_STAT_REPLY);
+}
+
+int
+nouveau_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+		      uint8_t write_byte, uint8_t *read_byte)
+{
+	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+	struct nouveau_i2c_chan *auxch = (struct nouveau_i2c_chan *)adapter;
+	struct drm_device *dev = auxch->dev;
+	int ret = 0, cmd, addr = algo_data->address;
+	uint8_t *buf;
+
+	if (mode == MODE_I2C_READ) {
+		cmd = AUX_I2C_READ;
+		buf = read_byte;
+	} else {
+		cmd = (mode & MODE_I2C_READ) ? AUX_I2C_READ : AUX_I2C_WRITE;
+		buf = &write_byte;
+	}
+
+	if (!(mode & MODE_I2C_STOP))
+		cmd |= AUX_I2C_MOT;
+
+	if (mode & MODE_I2C_START)
+		return 1;
+
+	for (;;) {
+		ret = nouveau_dp_auxch(auxch, cmd, addr, buf, 1);
+		if (ret < 0)
+			return ret;
+
+		switch (ret & NV50_AUXCH_STAT_REPLY_I2C) {
+		case NV50_AUXCH_STAT_REPLY_I2C_ACK:
+			return 1;
+		case NV50_AUXCH_STAT_REPLY_I2C_NACK:
+			return -EREMOTEIO;
+		case NV50_AUXCH_STAT_REPLY_I2C_DEFER:
+			udelay(100);
+			break;
+		default:
+			NV_ERROR(dev, "invalid auxch status: 0x%08x\n", ret);
+			return -EREMOTEIO;
+		}
+	}
+}
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
new file mode 100644
index 0000000..35249c3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -0,0 +1,405 @@
+/*
+ * Copyright 2005 Stephane Marchesin.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/console.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+#include "nouveau_fb.h"
+#include "nouveau_fbcon.h"
+#include "nv50_display.h"
+
+#include "drm_pciids.h"
+
+MODULE_PARM_DESC(noagp, "Disable AGP");
+int nouveau_noagp;
+module_param_named(noagp, nouveau_noagp, int, 0400);
+
+MODULE_PARM_DESC(modeset, "Enable kernel modesetting");
+static int nouveau_modeset = -1; /* kms */
+module_param_named(modeset, nouveau_modeset, int, 0400);
+
+MODULE_PARM_DESC(vbios, "Override default VBIOS location");
+char *nouveau_vbios;
+module_param_named(vbios, nouveau_vbios, charp, 0400);
+
+MODULE_PARM_DESC(vram_pushbuf, "Force DMA push buffers to be in VRAM");
+int nouveau_vram_pushbuf;
+module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
+
+MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
+int nouveau_vram_notify;
+module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
+
+MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
+int nouveau_duallink = 1;
+module_param_named(duallink, nouveau_duallink, int, 0400);
+
+MODULE_PARM_DESC(uscript_lvds, "LVDS output script table ID (>=GeForce 8)");
+int nouveau_uscript_lvds = -1;
+module_param_named(uscript_lvds, nouveau_uscript_lvds, int, 0400);
+
+MODULE_PARM_DESC(uscript_tmds, "TMDS output script table ID (>=GeForce 8)");
+int nouveau_uscript_tmds = -1;
+module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400);
+
+MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
+		 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
+		 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
+		 "\t\tDefault: PAL\n"
+		 "\t\t*NOTE* Ignored for cards with external TV encoders.");
+char *nouveau_tv_norm;
+module_param_named(tv_norm, nouveau_tv_norm, charp, 0400);
+
+MODULE_PARM_DESC(reg_debug, "Register access debug bitmask:\n"
+		"\t\t0x1 mc, 0x2 video, 0x4 fb, 0x8 extdev,\n"
+		"\t\t0x10 crtc, 0x20 ramdac, 0x40 vgacrtc, 0x80 rmvio,\n"
+		"\t\t0x100 vgaattr, 0x200 EVO (G80+). ");
+int nouveau_reg_debug;
+module_param_named(reg_debug, nouveau_reg_debug, int, 0600);
+
+int nouveau_fbpercrtc;
+#if 0
+module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
+#endif
+
+static struct pci_device_id pciidlist[] = {
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
+		.class = PCI_BASE_CLASS_DISPLAY << 16,
+		.class_mask  = 0xff << 16,
+	},
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID),
+		.class = PCI_BASE_CLASS_DISPLAY << 16,
+		.class_mask  = 0xff << 16,
+	},
+	{}
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static struct drm_driver driver;
+
+static int __devinit
+nouveau_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	return drm_get_dev(pdev, ent, &driver);
+}
+
+static void
+nouveau_pci_remove(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	drm_put_dev(dev);
+}
+
+static int
+nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	struct nouveau_channel *chan;
+	struct drm_crtc *crtc;
+	uint32_t fbdev_flags;
+	int ret, i;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
+	if (pm_state.event == PM_EVENT_PRETHAW)
+		return 0;
+
+	fbdev_flags = dev_priv->fbdev_info->flags;
+	dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct nouveau_framebuffer *nouveau_fb;
+
+		nouveau_fb = nouveau_framebuffer(crtc->fb);
+		if (!nouveau_fb || !nouveau_fb->nvbo)
+			continue;
+
+		nouveau_bo_unpin(nouveau_fb->nvbo);
+	}
+
+	NV_INFO(dev, "Evicting buffers...\n");
+	ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
+
+	NV_INFO(dev, "Idling channels...\n");
+	for (i = 0; i < pfifo->channels; i++) {
+		struct nouveau_fence *fence = NULL;
+
+		chan = dev_priv->fifos[i];
+		if (!chan || (dev_priv->card_type >= NV_50 &&
+			      chan == dev_priv->fifos[0]))
+			continue;
+
+		ret = nouveau_fence_new(chan, &fence, true);
+		if (ret == 0) {
+			ret = nouveau_fence_wait(fence, NULL, false, false);
+			nouveau_fence_unref((void *)&fence);
+		}
+
+		if (ret) {
+			NV_ERROR(dev, "Failed to idle channel %d for suspend\n",
+				 chan->id);
+		}
+	}
+
+	pgraph->fifo_access(dev, false);
+	nouveau_wait_for_idle(dev);
+	pfifo->reassign(dev, false);
+	pfifo->disable(dev);
+	pfifo->unload_context(dev);
+	pgraph->unload_context(dev);
+
+	NV_INFO(dev, "Suspending GPU objects...\n");
+	ret = nouveau_gpuobj_suspend(dev);
+	if (ret) {
+		NV_ERROR(dev, "... failed: %d\n", ret);
+		goto out_abort;
+	}
+
+	ret = pinstmem->suspend(dev);
+	if (ret) {
+		NV_ERROR(dev, "... failed: %d\n", ret);
+		nouveau_gpuobj_suspend_cleanup(dev);
+		goto out_abort;
+	}
+
+	NV_INFO(dev, "And we're gone!\n");
+	pci_save_state(pdev);
+	if (pm_state.event == PM_EVENT_SUSPEND) {
+		pci_disable_device(pdev);
+		pci_set_power_state(pdev, PCI_D3hot);
+	}
+
+	acquire_console_sem();
+	fb_set_suspend(dev_priv->fbdev_info, 1);
+	release_console_sem();
+	dev_priv->fbdev_info->flags = fbdev_flags;
+	return 0;
+
+out_abort:
+	NV_INFO(dev, "Re-enabling acceleration..\n");
+	pfifo->enable(dev);
+	pfifo->reassign(dev, true);
+	pgraph->fifo_access(dev, true);
+	return ret;
+}
+
+static int
+nouveau_pci_resume(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_engine *engine = &dev_priv->engine;
+	struct drm_crtc *crtc;
+	uint32_t fbdev_flags;
+	int ret, i;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
+	fbdev_flags = dev_priv->fbdev_info->flags;
+	dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
+
+	NV_INFO(dev, "We're back, enabling device...\n");
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	if (pci_enable_device(pdev))
+		return -1;
+	pci_set_master(dev->pdev);
+
+	NV_INFO(dev, "POSTing device...\n");
+	ret = nouveau_run_vbios_init(dev);
+	if (ret)
+		return ret;
+
+	if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+		ret = nouveau_mem_init_agp(dev);
+		if (ret) {
+			NV_ERROR(dev, "error reinitialising AGP: %d\n", ret);
+			return ret;
+		}
+	}
+
+	NV_INFO(dev, "Reinitialising engines...\n");
+	engine->instmem.resume(dev);
+	engine->mc.init(dev);
+	engine->timer.init(dev);
+	engine->fb.init(dev);
+	engine->graph.init(dev);
+	engine->fifo.init(dev);
+
+	NV_INFO(dev, "Restoring GPU objects...\n");
+	nouveau_gpuobj_resume(dev);
+
+	nouveau_irq_postinstall(dev);
+
+	/* Re-write SKIPS, they'll have been lost over the suspend */
+	if (nouveau_vram_pushbuf) {
+		struct nouveau_channel *chan;
+		int j;
+
+		for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+			chan = dev_priv->fifos[i];
+			if (!chan)
+				continue;
+
+			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
+				nouveau_bo_wr32(chan->pushbuf_bo, i, 0);
+		}
+	}
+
+	NV_INFO(dev, "Restoring mode...\n");
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct nouveau_framebuffer *nouveau_fb;
+
+		nouveau_fb = nouveau_framebuffer(crtc->fb);
+		if (!nouveau_fb || !nouveau_fb->nvbo)
+			continue;
+
+		nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
+	}
+
+	if (dev_priv->card_type < NV_50) {
+		nv04_display_restore(dev);
+		NVLockVgaCrtcs(dev, false);
+	} else
+		nv50_display_init(dev);
+
+	/* Force CLUT to get re-loaded during modeset */
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+		nv_crtc->lut.depth = 0;
+	}
+
+	acquire_console_sem();
+	fb_set_suspend(dev_priv->fbdev_info, 0);
+	release_console_sem();
+
+	nouveau_fbcon_zfill(dev);
+
+	drm_helper_resume_force_mode(dev);
+	dev_priv->fbdev_info->flags = fbdev_flags;
+	return 0;
+}
+
+static struct drm_driver driver = {
+	.driver_features =
+		DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
+		DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
+	.load = nouveau_load,
+	.firstopen = nouveau_firstopen,
+	.lastclose = nouveau_lastclose,
+	.unload = nouveau_unload,
+	.preclose = nouveau_preclose,
+#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
+	.debugfs_init = nouveau_debugfs_init,
+	.debugfs_cleanup = nouveau_debugfs_takedown,
+#endif
+	.irq_preinstall = nouveau_irq_preinstall,
+	.irq_postinstall = nouveau_irq_postinstall,
+	.irq_uninstall = nouveau_irq_uninstall,
+	.irq_handler = nouveau_irq_handler,
+	.reclaim_buffers = drm_core_reclaim_buffers,
+	.get_map_ofs = drm_core_get_map_ofs,
+	.get_reg_ofs = drm_core_get_reg_ofs,
+	.ioctls = nouveau_ioctls,
+	.fops = {
+		.owner = THIS_MODULE,
+		.open = drm_open,
+		.release = drm_release,
+		.ioctl = drm_ioctl,
+		.mmap = nouveau_ttm_mmap,
+		.poll = drm_poll,
+		.fasync = drm_fasync,
+#if defined(CONFIG_COMPAT)
+		.compat_ioctl = nouveau_compat_ioctl,
+#endif
+	},
+	.pci_driver = {
+		.name = DRIVER_NAME,
+		.id_table = pciidlist,
+		.probe = nouveau_pci_probe,
+		.remove = nouveau_pci_remove,
+		.suspend = nouveau_pci_suspend,
+		.resume = nouveau_pci_resume
+	},
+
+	.gem_init_object = nouveau_gem_object_new,
+	.gem_free_object = nouveau_gem_object_del,
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+#ifdef GIT_REVISION
+	.date = GIT_REVISION,
+#else
+	.date = DRIVER_DATE,
+#endif
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static int __init nouveau_init(void)
+{
+	driver.num_ioctls = nouveau_max_ioctl;
+
+	if (nouveau_modeset == -1) {
+#ifdef CONFIG_VGA_CONSOLE
+		if (vgacon_text_force())
+			nouveau_modeset = 0;
+		else
+#endif
+			nouveau_modeset = 1;
+	}
+
+	if (nouveau_modeset == 1)
+		driver.driver_features |= DRIVER_MODESET;
+
+	return drm_init(&driver);
+}
+
+static void __exit nouveau_exit(void)
+{
+	drm_exit(&driver);
+}
+
+module_init(nouveau_init);
+module_exit(nouveau_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
new file mode 100644
index 0000000..88b4c7b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -0,0 +1,1286 @@
+/*
+ * Copyright 2005 Stephane Marchesin.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_DRV_H__
+#define __NOUVEAU_DRV_H__
+
+#define DRIVER_AUTHOR		"Stephane Marchesin"
+#define DRIVER_EMAIL		"dri-devel@lists.sourceforge.net"
+
+#define DRIVER_NAME		"nouveau"
+#define DRIVER_DESC		"nVidia Riva/TNT/GeForce"
+#define DRIVER_DATE		"20090420"
+
+#define DRIVER_MAJOR		0
+#define DRIVER_MINOR		0
+#define DRIVER_PATCHLEVEL	15
+
+#define NOUVEAU_FAMILY   0x0000FFFF
+#define NOUVEAU_FLAGS    0xFFFF0000
+
+#include "ttm/ttm_bo_api.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include "ttm/ttm_memory.h"
+#include "ttm/ttm_module.h"
+
+struct nouveau_fpriv {
+	struct ttm_object_file *tfile;
+};
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
+#include "nouveau_bios.h"
+
+#define MAX_NUM_DCB_ENTRIES 16
+
+#define NOUVEAU_MAX_CHANNEL_NR 128
+
+#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL)
+#define NV50_VM_BLOCK    (512*1024*1024ULL)
+#define NV50_VM_VRAM_NR  (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
+
+struct nouveau_bo {
+	struct ttm_buffer_object bo;
+	struct ttm_placement placement;
+	u32 placements[3];
+	struct ttm_bo_kmap_obj kmap;
+	struct list_head head;
+
+	/* protected by ttm_bo_reserve() */
+	struct drm_file *reserved_by;
+	struct list_head entry;
+	int pbbo_index;
+
+	struct nouveau_channel *channel;
+
+	bool mappable;
+	bool no_vm;
+
+	uint32_t tile_mode;
+	uint32_t tile_flags;
+
+	struct drm_gem_object *gem;
+	struct drm_file *cpu_filp;
+	int pin_refcnt;
+};
+
+static inline struct nouveau_bo *
+nouveau_bo(struct ttm_buffer_object *bo)
+{
+	return container_of(bo, struct nouveau_bo, bo);
+}
+
+static inline struct nouveau_bo *
+nouveau_gem_object(struct drm_gem_object *gem)
+{
+	return gem ? gem->driver_private : NULL;
+}
+
+/* TODO: submit equivalent to TTM generic API upstream? */
+static inline void __iomem *
+nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
+{
+	bool is_iomem;
+	void __iomem *ioptr = (void __force __iomem *)ttm_kmap_obj_virtual(
+						&nvbo->kmap, &is_iomem);
+	WARN_ON_ONCE(ioptr && !is_iomem);
+	return ioptr;
+}
+
+struct mem_block {
+	struct mem_block *next;
+	struct mem_block *prev;
+	uint64_t start;
+	uint64_t size;
+	struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
+};
+
+enum nouveau_flags {
+	NV_NFORCE   = 0x10000000,
+	NV_NFORCE2  = 0x20000000
+};
+
+#define NVOBJ_ENGINE_SW		0
+#define NVOBJ_ENGINE_GR		1
+#define NVOBJ_ENGINE_DISPLAY	2
+#define NVOBJ_ENGINE_INT	0xdeadbeef
+
+#define NVOBJ_FLAG_ALLOW_NO_REFS	(1 << 0)
+#define NVOBJ_FLAG_ZERO_ALLOC		(1 << 1)
+#define NVOBJ_FLAG_ZERO_FREE		(1 << 2)
+#define NVOBJ_FLAG_FAKE			(1 << 3)
+struct nouveau_gpuobj {
+	struct list_head list;
+
+	struct nouveau_channel *im_channel;
+	struct mem_block *im_pramin;
+	struct nouveau_bo *im_backing;
+	uint32_t im_backing_start;
+	uint32_t *im_backing_suspend;
+	int im_bound;
+
+	uint32_t flags;
+	int refcount;
+
+	uint32_t engine;
+	uint32_t class;
+
+	void (*dtor)(struct drm_device *, struct nouveau_gpuobj *);
+	void *priv;
+};
+
+struct nouveau_gpuobj_ref {
+	struct list_head list;
+
+	struct nouveau_gpuobj *gpuobj;
+	uint32_t instance;
+
+	struct nouveau_channel *channel;
+	int handle;
+};
+
+struct nouveau_channel {
+	struct drm_device *dev;
+	int id;
+
+	/* owner of this fifo */
+	struct drm_file *file_priv;
+	/* mapping of the fifo itself */
+	struct drm_local_map *map;
+
+	/* mapping of the regs controling the fifo */
+	void __iomem *user;
+	uint32_t user_get;
+	uint32_t user_put;
+
+	/* Fencing */
+	struct {
+		/* lock protects the pending list only */
+		spinlock_t lock;
+		struct list_head pending;
+		uint32_t sequence;
+		uint32_t sequence_ack;
+		uint32_t last_sequence_irq;
+	} fence;
+
+	/* DMA push buffer */
+	struct nouveau_gpuobj_ref *pushbuf;
+	struct nouveau_bo         *pushbuf_bo;
+	uint32_t                   pushbuf_base;
+
+	/* Notifier memory */
+	struct nouveau_bo *notifier_bo;
+	struct mem_block *notifier_heap;
+
+	/* PFIFO context */
+	struct nouveau_gpuobj_ref *ramfc;
+	struct nouveau_gpuobj_ref *cache;
+
+	/* PGRAPH context */
+	/* XXX may be merge 2 pointers as private data ??? */
+	struct nouveau_gpuobj_ref *ramin_grctx;
+	void *pgraph_ctx;
+
+	/* NV50 VM */
+	struct nouveau_gpuobj     *vm_pd;
+	struct nouveau_gpuobj_ref *vm_gart_pt;
+	struct nouveau_gpuobj_ref *vm_vram_pt[NV50_VM_VRAM_NR];
+
+	/* Objects */
+	struct nouveau_gpuobj_ref *ramin; /* Private instmem */
+	struct mem_block          *ramin_heap; /* Private PRAMIN heap */
+	struct nouveau_gpuobj_ref *ramht; /* Hash table */
+	struct list_head           ramht_refs; /* Objects referenced by RAMHT */
+
+	/* GPU object info for stuff used in-kernel (mm_enabled) */
+	uint32_t m2mf_ntfy;
+	uint32_t vram_handle;
+	uint32_t gart_handle;
+	bool accel_done;
+
+	/* Push buffer state (only for drm's channel on !mm_enabled) */
+	struct {
+		int max;
+		int free;
+		int cur;
+		int put;
+		/* access via pushbuf_bo */
+	} dma;
+
+	uint32_t sw_subchannel[8];
+
+	struct {
+		struct nouveau_gpuobj *vblsem;
+		uint32_t vblsem_offset;
+		uint32_t vblsem_rval;
+		struct list_head vbl_wait;
+	} nvsw;
+
+	struct {
+		bool active;
+		char name[32];
+		struct drm_info_list info;
+	} debugfs;
+};
+
+struct nouveau_instmem_engine {
+	void	*priv;
+
+	int	(*init)(struct drm_device *dev);
+	void	(*takedown)(struct drm_device *dev);
+	int	(*suspend)(struct drm_device *dev);
+	void	(*resume)(struct drm_device *dev);
+
+	int	(*populate)(struct drm_device *, struct nouveau_gpuobj *,
+			    uint32_t *size);
+	void	(*clear)(struct drm_device *, struct nouveau_gpuobj *);
+	int	(*bind)(struct drm_device *, struct nouveau_gpuobj *);
+	int	(*unbind)(struct drm_device *, struct nouveau_gpuobj *);
+	void	(*prepare_access)(struct drm_device *, bool write);
+	void	(*finish_access)(struct drm_device *);
+};
+
+struct nouveau_mc_engine {
+	int  (*init)(struct drm_device *dev);
+	void (*takedown)(struct drm_device *dev);
+};
+
+struct nouveau_timer_engine {
+	int      (*init)(struct drm_device *dev);
+	void     (*takedown)(struct drm_device *dev);
+	uint64_t (*read)(struct drm_device *dev);
+};
+
+struct nouveau_fb_engine {
+	int  (*init)(struct drm_device *dev);
+	void (*takedown)(struct drm_device *dev);
+};
+
+struct nouveau_fifo_engine {
+	void *priv;
+
+	int  channels;
+
+	int  (*init)(struct drm_device *);
+	void (*takedown)(struct drm_device *);
+
+	void (*disable)(struct drm_device *);
+	void (*enable)(struct drm_device *);
+	bool (*reassign)(struct drm_device *, bool enable);
+
+	int  (*channel_id)(struct drm_device *);
+
+	int  (*create_context)(struct nouveau_channel *);
+	void (*destroy_context)(struct nouveau_channel *);
+	int  (*load_context)(struct nouveau_channel *);
+	int  (*unload_context)(struct drm_device *);
+};
+
+struct nouveau_pgraph_object_method {
+	int id;
+	int (*exec)(struct nouveau_channel *chan, int grclass, int mthd,
+		      uint32_t data);
+};
+
+struct nouveau_pgraph_object_class {
+	int id;
+	bool software;
+	struct nouveau_pgraph_object_method *methods;
+};
+
+struct nouveau_pgraph_engine {
+	struct nouveau_pgraph_object_class *grclass;
+	bool accel_blocked;
+	void *ctxprog;
+	void *ctxvals;
+
+	int  (*init)(struct drm_device *);
+	void (*takedown)(struct drm_device *);
+
+	void (*fifo_access)(struct drm_device *, bool);
+
+	struct nouveau_channel *(*channel)(struct drm_device *);
+	int  (*create_context)(struct nouveau_channel *);
+	void (*destroy_context)(struct nouveau_channel *);
+	int  (*load_context)(struct nouveau_channel *);
+	int  (*unload_context)(struct drm_device *);
+};
+
+struct nouveau_engine {
+	struct nouveau_instmem_engine instmem;
+	struct nouveau_mc_engine      mc;
+	struct nouveau_timer_engine   timer;
+	struct nouveau_fb_engine      fb;
+	struct nouveau_pgraph_engine  graph;
+	struct nouveau_fifo_engine    fifo;
+};
+
+struct nouveau_pll_vals {
+	union {
+		struct {
+#ifdef __BIG_ENDIAN
+			uint8_t N1, M1, N2, M2;
+#else
+			uint8_t M1, N1, M2, N2;
+#endif
+		};
+		struct {
+			uint16_t NM1, NM2;
+		} __attribute__((packed));
+	};
+	int log2P;
+
+	int refclk;
+};
+
+enum nv04_fp_display_regs {
+	FP_DISPLAY_END,
+	FP_TOTAL,
+	FP_CRTC,
+	FP_SYNC_START,
+	FP_SYNC_END,
+	FP_VALID_START,
+	FP_VALID_END
+};
+
+struct nv04_crtc_reg {
+	unsigned char MiscOutReg;     /* */
+	uint8_t CRTC[0x9f];
+	uint8_t CR58[0x10];
+	uint8_t Sequencer[5];
+	uint8_t Graphics[9];
+	uint8_t Attribute[21];
+	unsigned char DAC[768];       /* Internal Colorlookuptable */
+
+	/* PCRTC regs */
+	uint32_t fb_start;
+	uint32_t crtc_cfg;
+	uint32_t cursor_cfg;
+	uint32_t gpio_ext;
+	uint32_t crtc_830;
+	uint32_t crtc_834;
+	uint32_t crtc_850;
+	uint32_t crtc_eng_ctrl;
+
+	/* PRAMDAC regs */
+	uint32_t nv10_cursync;
+	struct nouveau_pll_vals pllvals;
+	uint32_t ramdac_gen_ctrl;
+	uint32_t ramdac_630;
+	uint32_t ramdac_634;
+	uint32_t tv_setup;
+	uint32_t tv_vtotal;
+	uint32_t tv_vskew;
+	uint32_t tv_vsync_delay;
+	uint32_t tv_htotal;
+	uint32_t tv_hskew;
+	uint32_t tv_hsync_delay;
+	uint32_t tv_hsync_delay2;
+	uint32_t fp_horiz_regs[7];
+	uint32_t fp_vert_regs[7];
+	uint32_t dither;
+	uint32_t fp_control;
+	uint32_t dither_regs[6];
+	uint32_t fp_debug_0;
+	uint32_t fp_debug_1;
+	uint32_t fp_debug_2;
+	uint32_t fp_margin_color;
+	uint32_t ramdac_8c0;
+	uint32_t ramdac_a20;
+	uint32_t ramdac_a24;
+	uint32_t ramdac_a34;
+	uint32_t ctv_regs[38];
+};
+
+struct nv04_output_reg {
+	uint32_t output;
+	int head;
+};
+
+struct nv04_mode_state {
+	uint32_t bpp;
+	uint32_t width;
+	uint32_t height;
+	uint32_t interlace;
+	uint32_t repaint0;
+	uint32_t repaint1;
+	uint32_t screen;
+	uint32_t scale;
+	uint32_t dither;
+	uint32_t extra;
+	uint32_t fifo;
+	uint32_t pixel;
+	uint32_t horiz;
+	int arbitration0;
+	int arbitration1;
+	uint32_t pll;
+	uint32_t pllB;
+	uint32_t vpll;
+	uint32_t vpll2;
+	uint32_t vpllB;
+	uint32_t vpll2B;
+	uint32_t pllsel;
+	uint32_t sel_clk;
+	uint32_t general;
+	uint32_t crtcOwner;
+	uint32_t head;
+	uint32_t head2;
+	uint32_t cursorConfig;
+	uint32_t cursor0;
+	uint32_t cursor1;
+	uint32_t cursor2;
+	uint32_t timingH;
+	uint32_t timingV;
+	uint32_t displayV;
+	uint32_t crtcSync;
+
+	struct nv04_crtc_reg crtc_reg[2];
+};
+
+enum nouveau_card_type {
+	NV_04      = 0x00,
+	NV_10      = 0x10,
+	NV_20      = 0x20,
+	NV_30      = 0x30,
+	NV_40      = 0x40,
+	NV_50      = 0x50,
+};
+
+struct drm_nouveau_private {
+	struct drm_device *dev;
+	enum {
+		NOUVEAU_CARD_INIT_DOWN,
+		NOUVEAU_CARD_INIT_DONE,
+		NOUVEAU_CARD_INIT_FAILED
+	} init_state;
+
+	/* the card type, takes NV_* as values */
+	enum nouveau_card_type card_type;
+	/* exact chipset, derived from NV_PMC_BOOT_0 */
+	int chipset;
+	int flags;
+
+	void __iomem *mmio;
+	void __iomem *ramin;
+	uint32_t ramin_size;
+
+	struct workqueue_struct *wq;
+	struct work_struct irq_work;
+
+	struct list_head vbl_waiting;
+
+	struct {
+		struct ttm_global_reference mem_global_ref;
+		struct ttm_bo_global_ref bo_global_ref;
+		struct ttm_bo_device bdev;
+		spinlock_t bo_list_lock;
+		struct list_head bo_list;
+		atomic_t validate_sequence;
+	} ttm;
+
+	struct fb_info *fbdev_info;
+
+	int fifo_alloc_count;
+	struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
+
+	struct nouveau_engine engine;
+	struct nouveau_channel *channel;
+
+	/* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
+	struct nouveau_gpuobj *ramht;
+	uint32_t ramin_rsvd_vram;
+	uint32_t ramht_offset;
+	uint32_t ramht_size;
+	uint32_t ramht_bits;
+	uint32_t ramfc_offset;
+	uint32_t ramfc_size;
+	uint32_t ramro_offset;
+	uint32_t ramro_size;
+
+	/* base physical adresses */
+	uint64_t fb_phys;
+	uint64_t fb_available_size;
+	uint64_t fb_mappable_pages;
+	uint64_t fb_aper_free;
+
+	struct {
+		enum {
+			NOUVEAU_GART_NONE = 0,
+			NOUVEAU_GART_AGP,
+			NOUVEAU_GART_SGDMA
+		} type;
+		uint64_t aper_base;
+		uint64_t aper_size;
+		uint64_t aper_free;
+
+		struct nouveau_gpuobj *sg_ctxdma;
+		struct page *sg_dummy_page;
+		dma_addr_t sg_dummy_bus;
+
+		/* nottm hack */
+		struct drm_ttm_backend *sg_be;
+		unsigned long sg_handle;
+	} gart_info;
+
+	/* G8x/G9x virtual address space */
+	uint64_t vm_gart_base;
+	uint64_t vm_gart_size;
+	uint64_t vm_vram_base;
+	uint64_t vm_vram_size;
+	uint64_t vm_end;
+	struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
+	int vm_vram_pt_nr;
+
+	/* the mtrr covering the FB */
+	int fb_mtrr;
+
+	struct mem_block *ramin_heap;
+
+	/* context table pointed to be NV_PGRAPH_CHANNEL_CTX_TABLE (0x400780) */
+	uint32_t ctx_table_size;
+	struct nouveau_gpuobj_ref *ctx_table;
+
+	struct list_head gpuobj_list;
+
+	struct nvbios VBIOS;
+	struct nouveau_bios_info *vbios;
+
+	struct nv04_mode_state mode_reg;
+	struct nv04_mode_state saved_reg;
+	uint32_t saved_vga_font[4][16384];
+	uint32_t crtc_owner;
+	uint32_t dac_users[4];
+
+	struct nouveau_suspend_resume {
+		uint32_t fifo_mode;
+		uint32_t graph_ctx_control;
+		uint32_t graph_state;
+		uint32_t *ramin_copy;
+		uint64_t ramin_size;
+	} susres;
+
+	struct backlight_device *backlight;
+	bool acpi_dsm;
+
+	struct nouveau_channel *evo;
+
+	struct {
+		struct dentry *channel_root;
+	} debugfs;
+};
+
+static inline struct drm_nouveau_private *
+nouveau_bdev(struct ttm_bo_device *bd)
+{
+	return container_of(bd, struct drm_nouveau_private, ttm.bdev);
+}
+
+static inline int
+nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
+{
+	struct nouveau_bo *prev;
+
+	if (!pnvbo)
+		return -EINVAL;
+	prev = *pnvbo;
+
+	*pnvbo = ref ? nouveau_bo(ttm_bo_reference(&ref->bo)) : NULL;
+	if (prev) {
+		struct ttm_buffer_object *bo = &prev->bo;
+
+		ttm_bo_unref(&bo);
+	}
+
+	return 0;
+}
+
+#define NOUVEAU_CHECK_INITIALISED_WITH_RETURN do {            \
+	struct drm_nouveau_private *nv = dev->dev_private;    \
+	if (nv->init_state != NOUVEAU_CARD_INIT_DONE) {       \
+		NV_ERROR(dev, "called without init\n");       \
+		return -EINVAL;                               \
+	}                                                     \
+} while (0)
+
+#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do {    \
+	struct drm_nouveau_private *nv = dev->dev_private;       \
+	if (!nouveau_channel_owner(dev, (cl), (id))) {           \
+		NV_ERROR(dev, "pid %d doesn't own channel %d\n", \
+			 DRM_CURRENTPID, (id));                  \
+		return -EPERM;                                   \
+	}                                                        \
+	(ch) = nv->fifos[(id)];                                  \
+} while (0)
+
+/* nouveau_drv.c */
+extern int nouveau_noagp;
+extern int nouveau_duallink;
+extern int nouveau_uscript_lvds;
+extern int nouveau_uscript_tmds;
+extern int nouveau_vram_pushbuf;
+extern int nouveau_vram_notify;
+extern int nouveau_fbpercrtc;
+extern char *nouveau_tv_norm;
+extern int nouveau_reg_debug;
+extern char *nouveau_vbios;
+
+/* nouveau_state.c */
+extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
+extern int  nouveau_load(struct drm_device *, unsigned long flags);
+extern int  nouveau_firstopen(struct drm_device *);
+extern void nouveau_lastclose(struct drm_device *);
+extern int  nouveau_unload(struct drm_device *);
+extern int  nouveau_ioctl_getparam(struct drm_device *, void *data,
+				   struct drm_file *);
+extern int  nouveau_ioctl_setparam(struct drm_device *, void *data,
+				   struct drm_file *);
+extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout,
+			       uint32_t reg, uint32_t mask, uint32_t val);
+extern bool nouveau_wait_for_idle(struct drm_device *);
+extern int  nouveau_card_init(struct drm_device *);
+extern int  nouveau_ioctl_card_init(struct drm_device *, void *data,
+				    struct drm_file *);
+extern int  nouveau_ioctl_suspend(struct drm_device *, void *data,
+				  struct drm_file *);
+extern int  nouveau_ioctl_resume(struct drm_device *, void *data,
+				 struct drm_file *);
+
+/* nouveau_mem.c */
+extern int  nouveau_mem_init_heap(struct mem_block **, uint64_t start,
+				 uint64_t size);
+extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *,
+						 uint64_t size, int align2,
+						 struct drm_file *, int tail);
+extern void nouveau_mem_takedown(struct mem_block **heap);
+extern void nouveau_mem_free_block(struct mem_block *);
+extern uint64_t nouveau_mem_fb_amount(struct drm_device *);
+extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap);
+extern int  nouveau_mem_init(struct drm_device *);
+extern int  nouveau_mem_init_agp(struct drm_device *);
+extern void nouveau_mem_close(struct drm_device *);
+extern int  nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
+				    uint32_t size, uint32_t flags,
+				    uint64_t phys);
+extern void nv50_mem_vm_unbind(struct drm_device *, uint64_t virt,
+			       uint32_t size);
+
+/* nouveau_notifier.c */
+extern int  nouveau_notifier_init_channel(struct nouveau_channel *);
+extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
+extern int  nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
+				   int cout, uint32_t *offset);
+extern int  nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *);
+extern int  nouveau_ioctl_notifier_alloc(struct drm_device *, void *data,
+					 struct drm_file *);
+extern int  nouveau_ioctl_notifier_free(struct drm_device *, void *data,
+					struct drm_file *);
+
+/* nouveau_channel.c */
+extern struct drm_ioctl_desc nouveau_ioctls[];
+extern int nouveau_max_ioctl;
+extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *);
+extern int  nouveau_channel_owner(struct drm_device *, struct drm_file *,
+				  int channel);
+extern int  nouveau_channel_alloc(struct drm_device *dev,
+				  struct nouveau_channel **chan,
+				  struct drm_file *file_priv,
+				  uint32_t fb_ctxdma, uint32_t tt_ctxdma);
+extern void nouveau_channel_free(struct nouveau_channel *);
+extern int  nouveau_channel_idle(struct nouveau_channel *chan);
+
+/* nouveau_object.c */
+extern int  nouveau_gpuobj_early_init(struct drm_device *);
+extern int  nouveau_gpuobj_init(struct drm_device *);
+extern void nouveau_gpuobj_takedown(struct drm_device *);
+extern void nouveau_gpuobj_late_takedown(struct drm_device *);
+extern int  nouveau_gpuobj_suspend(struct drm_device *dev);
+extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev);
+extern void nouveau_gpuobj_resume(struct drm_device *dev);
+extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
+				       uint32_t vram_h, uint32_t tt_h);
+extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
+extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *,
+			      uint32_t size, int align, uint32_t flags,
+			      struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *,
+				  uint32_t handle, struct nouveau_gpuobj *,
+				  struct nouveau_gpuobj_ref **);
+extern int nouveau_gpuobj_ref_del(struct drm_device *,
+				  struct nouveau_gpuobj_ref **);
+extern int nouveau_gpuobj_ref_find(struct nouveau_channel *, uint32_t handle,
+				   struct nouveau_gpuobj_ref **ref_ret);
+extern int nouveau_gpuobj_new_ref(struct drm_device *,
+				  struct nouveau_channel *alloc_chan,
+				  struct nouveau_channel *ref_chan,
+				  uint32_t handle, uint32_t size, int align,
+				  uint32_t flags, struct nouveau_gpuobj_ref **);
+extern int nouveau_gpuobj_new_fake(struct drm_device *,
+				   uint32_t p_offset, uint32_t b_offset,
+				   uint32_t size, uint32_t flags,
+				   struct nouveau_gpuobj **,
+				   struct nouveau_gpuobj_ref**);
+extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
+				  uint64_t offset, uint64_t size, int access,
+				  int target, struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
+				       uint64_t offset, uint64_t size,
+				       int access, struct nouveau_gpuobj **,
+				       uint32_t *o_ret);
+extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
+				 struct nouveau_gpuobj **);
+extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
+				     struct drm_file *);
+extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
+				     struct drm_file *);
+
+/* nouveau_irq.c */
+extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
+extern void        nouveau_irq_preinstall(struct drm_device *);
+extern int         nouveau_irq_postinstall(struct drm_device *);
+extern void        nouveau_irq_uninstall(struct drm_device *);
+
+/* nouveau_sgdma.c */
+extern int nouveau_sgdma_init(struct drm_device *);
+extern void nouveau_sgdma_takedown(struct drm_device *);
+extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset,
+				  uint32_t *page);
+extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
+
+/* nouveau_debugfs.c */
+#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
+extern int  nouveau_debugfs_init(struct drm_minor *);
+extern void nouveau_debugfs_takedown(struct drm_minor *);
+extern int  nouveau_debugfs_channel_init(struct nouveau_channel *);
+extern void nouveau_debugfs_channel_fini(struct nouveau_channel *);
+#else
+static inline int
+nouveau_debugfs_init(struct drm_minor *minor)
+{
+	return 0;
+}
+
+static inline void nouveau_debugfs_takedown(struct drm_minor *minor)
+{
+}
+
+static inline int
+nouveau_debugfs_channel_init(struct nouveau_channel *chan)
+{
+	return 0;
+}
+
+static inline void
+nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
+{
+}
+#endif
+
+/* nouveau_dma.c */
+extern int  nouveau_dma_init(struct nouveau_channel *);
+extern int  nouveau_dma_wait(struct nouveau_channel *, int size);
+
+/* nouveau_acpi.c */
+#ifdef CONFIG_ACPI
+extern int nouveau_hybrid_setup(struct drm_device *dev);
+extern bool nouveau_dsm_probe(struct drm_device *dev);
+#else
+static inline int nouveau_hybrid_setup(struct drm_device *dev)
+{
+	return 0;
+}
+static inline bool nouveau_dsm_probe(struct drm_device *dev)
+{
+	return false;
+}
+#endif
+
+/* nouveau_backlight.c */
+#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
+extern int nouveau_backlight_init(struct drm_device *);
+extern void nouveau_backlight_exit(struct drm_device *);
+#else
+static inline int nouveau_backlight_init(struct drm_device *dev)
+{
+	return 0;
+}
+
+static inline void nouveau_backlight_exit(struct drm_device *dev) { }
+#endif
+
+/* nouveau_bios.c */
+extern int nouveau_bios_init(struct drm_device *);
+extern void nouveau_bios_takedown(struct drm_device *dev);
+extern int nouveau_run_vbios_init(struct drm_device *);
+extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table,
+					struct dcb_entry *);
+extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *,
+						      enum dcb_gpio_tag);
+extern struct dcb_connector_table_entry *
+nouveau_bios_connector_entry(struct drm_device *, int index);
+extern int get_pll_limits(struct drm_device *, uint32_t limit_match,
+			  struct pll_lims *);
+extern int nouveau_bios_run_display_table(struct drm_device *,
+					  struct dcb_entry *,
+					  uint32_t script, int pxclk);
+extern void *nouveau_bios_dp_table(struct drm_device *, struct dcb_entry *,
+				   int *length);
+extern bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
+extern uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
+extern int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
+					 bool *dl, bool *if_is_24bit);
+extern int run_tmds_table(struct drm_device *, struct dcb_entry *,
+			  int head, int pxclk);
+extern int call_lvds_script(struct drm_device *, struct dcb_entry *, int head,
+			    enum LVDS_script, int pxclk);
+
+/* nouveau_ttm.c */
+int nouveau_ttm_global_init(struct drm_nouveau_private *);
+void nouveau_ttm_global_release(struct drm_nouveau_private *);
+int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
+
+/* nouveau_dp.c */
+int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
+		     uint8_t *data, int data_nr);
+bool nouveau_dp_detect(struct drm_encoder *);
+bool nouveau_dp_link_train(struct drm_encoder *);
+
+/* nv04_fb.c */
+extern int  nv04_fb_init(struct drm_device *);
+extern void nv04_fb_takedown(struct drm_device *);
+
+/* nv10_fb.c */
+extern int  nv10_fb_init(struct drm_device *);
+extern void nv10_fb_takedown(struct drm_device *);
+
+/* nv40_fb.c */
+extern int  nv40_fb_init(struct drm_device *);
+extern void nv40_fb_takedown(struct drm_device *);
+
+/* nv04_fifo.c */
+extern int  nv04_fifo_init(struct drm_device *);
+extern void nv04_fifo_disable(struct drm_device *);
+extern void nv04_fifo_enable(struct drm_device *);
+extern bool nv04_fifo_reassign(struct drm_device *, bool);
+extern int  nv04_fifo_channel_id(struct drm_device *);
+extern int  nv04_fifo_create_context(struct nouveau_channel *);
+extern void nv04_fifo_destroy_context(struct nouveau_channel *);
+extern int  nv04_fifo_load_context(struct nouveau_channel *);
+extern int  nv04_fifo_unload_context(struct drm_device *);
+
+/* nv10_fifo.c */
+extern int  nv10_fifo_init(struct drm_device *);
+extern int  nv10_fifo_channel_id(struct drm_device *);
+extern int  nv10_fifo_create_context(struct nouveau_channel *);
+extern void nv10_fifo_destroy_context(struct nouveau_channel *);
+extern int  nv10_fifo_load_context(struct nouveau_channel *);
+extern int  nv10_fifo_unload_context(struct drm_device *);
+
+/* nv40_fifo.c */
+extern int  nv40_fifo_init(struct drm_device *);
+extern int  nv40_fifo_create_context(struct nouveau_channel *);
+extern void nv40_fifo_destroy_context(struct nouveau_channel *);
+extern int  nv40_fifo_load_context(struct nouveau_channel *);
+extern int  nv40_fifo_unload_context(struct drm_device *);
+
+/* nv50_fifo.c */
+extern int  nv50_fifo_init(struct drm_device *);
+extern void nv50_fifo_takedown(struct drm_device *);
+extern int  nv50_fifo_channel_id(struct drm_device *);
+extern int  nv50_fifo_create_context(struct nouveau_channel *);
+extern void nv50_fifo_destroy_context(struct nouveau_channel *);
+extern int  nv50_fifo_load_context(struct nouveau_channel *);
+extern int  nv50_fifo_unload_context(struct drm_device *);
+
+/* nv04_graph.c */
+extern struct nouveau_pgraph_object_class nv04_graph_grclass[];
+extern int  nv04_graph_init(struct drm_device *);
+extern void nv04_graph_takedown(struct drm_device *);
+extern void nv04_graph_fifo_access(struct drm_device *, bool);
+extern struct nouveau_channel *nv04_graph_channel(struct drm_device *);
+extern int  nv04_graph_create_context(struct nouveau_channel *);
+extern void nv04_graph_destroy_context(struct nouveau_channel *);
+extern int  nv04_graph_load_context(struct nouveau_channel *);
+extern int  nv04_graph_unload_context(struct drm_device *);
+extern void nv04_graph_context_switch(struct drm_device *);
+
+/* nv10_graph.c */
+extern struct nouveau_pgraph_object_class nv10_graph_grclass[];
+extern int  nv10_graph_init(struct drm_device *);
+extern void nv10_graph_takedown(struct drm_device *);
+extern struct nouveau_channel *nv10_graph_channel(struct drm_device *);
+extern int  nv10_graph_create_context(struct nouveau_channel *);
+extern void nv10_graph_destroy_context(struct nouveau_channel *);
+extern int  nv10_graph_load_context(struct nouveau_channel *);
+extern int  nv10_graph_unload_context(struct drm_device *);
+extern void nv10_graph_context_switch(struct drm_device *);
+
+/* nv20_graph.c */
+extern struct nouveau_pgraph_object_class nv20_graph_grclass[];
+extern struct nouveau_pgraph_object_class nv30_graph_grclass[];
+extern int  nv20_graph_create_context(struct nouveau_channel *);
+extern void nv20_graph_destroy_context(struct nouveau_channel *);
+extern int  nv20_graph_load_context(struct nouveau_channel *);
+extern int  nv20_graph_unload_context(struct drm_device *);
+extern int  nv20_graph_init(struct drm_device *);
+extern void nv20_graph_takedown(struct drm_device *);
+extern int  nv30_graph_init(struct drm_device *);
+
+/* nv40_graph.c */
+extern struct nouveau_pgraph_object_class nv40_graph_grclass[];
+extern int  nv40_graph_init(struct drm_device *);
+extern void nv40_graph_takedown(struct drm_device *);
+extern struct nouveau_channel *nv40_graph_channel(struct drm_device *);
+extern int  nv40_graph_create_context(struct nouveau_channel *);
+extern void nv40_graph_destroy_context(struct nouveau_channel *);
+extern int  nv40_graph_load_context(struct nouveau_channel *);
+extern int  nv40_graph_unload_context(struct drm_device *);
+extern int  nv40_grctx_init(struct drm_device *);
+extern void nv40_grctx_fini(struct drm_device *);
+extern void nv40_grctx_vals_load(struct drm_device *, struct nouveau_gpuobj *);
+
+/* nv50_graph.c */
+extern struct nouveau_pgraph_object_class nv50_graph_grclass[];
+extern int  nv50_graph_init(struct drm_device *);
+extern void nv50_graph_takedown(struct drm_device *);
+extern void nv50_graph_fifo_access(struct drm_device *, bool);
+extern struct nouveau_channel *nv50_graph_channel(struct drm_device *);
+extern int  nv50_graph_create_context(struct nouveau_channel *);
+extern void nv50_graph_destroy_context(struct nouveau_channel *);
+extern int  nv50_graph_load_context(struct nouveau_channel *);
+extern int  nv50_graph_unload_context(struct drm_device *);
+extern void nv50_graph_context_switch(struct drm_device *);
+
+/* nv04_instmem.c */
+extern int  nv04_instmem_init(struct drm_device *);
+extern void nv04_instmem_takedown(struct drm_device *);
+extern int  nv04_instmem_suspend(struct drm_device *);
+extern void nv04_instmem_resume(struct drm_device *);
+extern int  nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
+				  uint32_t *size);
+extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
+extern int  nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
+extern int  nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
+extern void nv04_instmem_prepare_access(struct drm_device *, bool write);
+extern void nv04_instmem_finish_access(struct drm_device *);
+
+/* nv50_instmem.c */
+extern int  nv50_instmem_init(struct drm_device *);
+extern void nv50_instmem_takedown(struct drm_device *);
+extern int  nv50_instmem_suspend(struct drm_device *);
+extern void nv50_instmem_resume(struct drm_device *);
+extern int  nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
+				  uint32_t *size);
+extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
+extern int  nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
+extern int  nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
+extern void nv50_instmem_prepare_access(struct drm_device *, bool write);
+extern void nv50_instmem_finish_access(struct drm_device *);
+
+/* nv04_mc.c */
+extern int  nv04_mc_init(struct drm_device *);
+extern void nv04_mc_takedown(struct drm_device *);
+
+/* nv40_mc.c */
+extern int  nv40_mc_init(struct drm_device *);
+extern void nv40_mc_takedown(struct drm_device *);
+
+/* nv50_mc.c */
+extern int  nv50_mc_init(struct drm_device *);
+extern void nv50_mc_takedown(struct drm_device *);
+
+/* nv04_timer.c */
+extern int  nv04_timer_init(struct drm_device *);
+extern uint64_t nv04_timer_read(struct drm_device *);
+extern void nv04_timer_takedown(struct drm_device *);
+
+extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
+				 unsigned long arg);
+
+/* nv04_dac.c */
+extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry);
+extern enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
+						 struct drm_connector *connector);
+extern int nv04_dac_output_offset(struct drm_encoder *encoder);
+extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable);
+
+/* nv04_dfp.c */
+extern int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry);
+extern int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent);
+extern void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent,
+			       int head, bool dl);
+extern void nv04_dfp_disable(struct drm_device *dev, int head);
+extern void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode);
+
+/* nv04_tv.c */
+extern int nv04_tv_identify(struct drm_device *dev, int i2c_index);
+extern int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry);
+
+/* nv17_tv.c */
+extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry);
+extern enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
+						struct drm_connector *connector,
+						uint32_t pin_mask);
+
+/* nv04_display.c */
+extern int nv04_display_create(struct drm_device *);
+extern void nv04_display_destroy(struct drm_device *);
+extern void nv04_display_restore(struct drm_device *);
+
+/* nv04_crtc.c */
+extern int nv04_crtc_create(struct drm_device *, int index);
+
+/* nouveau_bo.c */
+extern struct ttm_bo_driver nouveau_bo_driver;
+extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *,
+			  int size, int align, uint32_t flags,
+			  uint32_t tile_mode, uint32_t tile_flags,
+			  bool no_vm, bool mappable, struct nouveau_bo **);
+extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
+extern int nouveau_bo_unpin(struct nouveau_bo *);
+extern int nouveau_bo_map(struct nouveau_bo *);
+extern void nouveau_bo_unmap(struct nouveau_bo *);
+extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t memtype);
+extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
+extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
+extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
+extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
+
+/* nouveau_fence.c */
+struct nouveau_fence;
+extern int nouveau_fence_init(struct nouveau_channel *);
+extern void nouveau_fence_fini(struct nouveau_channel *);
+extern void nouveau_fence_update(struct nouveau_channel *);
+extern int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **,
+			     bool emit);
+extern int nouveau_fence_emit(struct nouveau_fence *);
+struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
+extern bool nouveau_fence_signalled(void *obj, void *arg);
+extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
+extern int nouveau_fence_flush(void *obj, void *arg);
+extern void nouveau_fence_unref(void **obj);
+extern void *nouveau_fence_ref(void *obj);
+extern void nouveau_fence_handler(struct drm_device *dev, int channel);
+
+/* nouveau_gem.c */
+extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
+			   int size, int align, uint32_t flags,
+			   uint32_t tile_mode, uint32_t tile_flags,
+			   bool no_vm, bool mappable, struct nouveau_bo **);
+extern int nouveau_gem_object_new(struct drm_gem_object *);
+extern void nouveau_gem_object_del(struct drm_gem_object *);
+extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
+				 struct drm_file *);
+extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
+				     struct drm_file *);
+extern int nouveau_gem_ioctl_pushbuf_call(struct drm_device *, void *,
+					  struct drm_file *);
+extern int nouveau_gem_ioctl_pushbuf_call2(struct drm_device *, void *,
+					   struct drm_file *);
+extern int nouveau_gem_ioctl_pin(struct drm_device *, void *,
+				 struct drm_file *);
+extern int nouveau_gem_ioctl_unpin(struct drm_device *, void *,
+				   struct drm_file *);
+extern int nouveau_gem_ioctl_tile(struct drm_device *, void *,
+				  struct drm_file *);
+extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *,
+				      struct drm_file *);
+extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
+				      struct drm_file *);
+extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
+				  struct drm_file *);
+
+/* nv17_gpio.c */
+int nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
+int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
+
+#ifndef ioread32_native
+#ifdef __BIG_ENDIAN
+#define ioread16_native ioread16be
+#define iowrite16_native iowrite16be
+#define ioread32_native  ioread32be
+#define iowrite32_native iowrite32be
+#else /* def __BIG_ENDIAN */
+#define ioread16_native ioread16
+#define iowrite16_native iowrite16
+#define ioread32_native  ioread32
+#define iowrite32_native iowrite32
+#endif /* def __BIG_ENDIAN else */
+#endif /* !ioread32_native */
+
+/* channel control reg access */
+static inline u32 nvchan_rd32(struct nouveau_channel *chan, unsigned reg)
+{
+	return ioread32_native(chan->user + reg);
+}
+
+static inline void nvchan_wr32(struct nouveau_channel *chan,
+							unsigned reg, u32 val)
+{
+	iowrite32_native(val, chan->user + reg);
+}
+
+/* register access */
+static inline u32 nv_rd32(struct drm_device *dev, unsigned reg)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	return ioread32_native(dev_priv->mmio + reg);
+}
+
+static inline void nv_wr32(struct drm_device *dev, unsigned reg, u32 val)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	iowrite32_native(val, dev_priv->mmio + reg);
+}
+
+static inline u8 nv_rd08(struct drm_device *dev, unsigned reg)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	return ioread8(dev_priv->mmio + reg);
+}
+
+static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	iowrite8(val, dev_priv->mmio + reg);
+}
+
+#define nv_wait(reg, mask, val) \
+	nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val))
+
+/* PRAMIN access */
+static inline u32 nv_ri32(struct drm_device *dev, unsigned offset)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	return ioread32_native(dev_priv->ramin + offset);
+}
+
+static inline void nv_wi32(struct drm_device *dev, unsigned offset, u32 val)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	iowrite32_native(val, dev_priv->ramin + offset);
+}
+
+/* object access */
+static inline u32 nv_ro32(struct drm_device *dev, struct nouveau_gpuobj *obj,
+				unsigned index)
+{
+	return nv_ri32(dev, obj->im_pramin->start + index * 4);
+}
+
+static inline void nv_wo32(struct drm_device *dev, struct nouveau_gpuobj *obj,
+				unsigned index, u32 val)
+{
+	nv_wi32(dev, obj->im_pramin->start + index * 4, val);
+}
+
+/*
+ * Logging
+ * Argument d is (struct drm_device *).
+ */
+#define NV_PRINTK(level, d, fmt, arg...) \
+	printk(level "[" DRM_NAME "] " DRIVER_NAME " %s: " fmt, \
+					pci_name(d->pdev), ##arg)
+#ifndef NV_DEBUG_NOTRACE
+#define NV_DEBUG(d, fmt, arg...) do {                                          \
+	if (drm_debug) {                                                       \
+		NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__,             \
+			  __LINE__, ##arg);                                    \
+	}                                                                      \
+} while (0)
+#else
+#define NV_DEBUG(d, fmt, arg...) do {                                          \
+	if (drm_debug)                                                         \
+		NV_PRINTK(KERN_DEBUG, d, fmt, ##arg);                          \
+} while (0)
+#endif
+#define NV_ERROR(d, fmt, arg...) NV_PRINTK(KERN_ERR, d, fmt, ##arg)
+#define NV_INFO(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
+#define NV_TRACEWARN(d, fmt, arg...) NV_PRINTK(KERN_NOTICE, d, fmt, ##arg)
+#define NV_TRACE(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
+#define NV_WARN(d, fmt, arg...) NV_PRINTK(KERN_WARNING, d, fmt, ##arg)
+
+/* nouveau_reg_debug bitmask */
+enum {
+	NOUVEAU_REG_DEBUG_MC             = 0x1,
+	NOUVEAU_REG_DEBUG_VIDEO          = 0x2,
+	NOUVEAU_REG_DEBUG_FB             = 0x4,
+	NOUVEAU_REG_DEBUG_EXTDEV         = 0x8,
+	NOUVEAU_REG_DEBUG_CRTC           = 0x10,
+	NOUVEAU_REG_DEBUG_RAMDAC         = 0x20,
+	NOUVEAU_REG_DEBUG_VGACRTC        = 0x40,
+	NOUVEAU_REG_DEBUG_RMVIO          = 0x80,
+	NOUVEAU_REG_DEBUG_VGAATTR        = 0x100,
+	NOUVEAU_REG_DEBUG_EVO            = 0x200,
+};
+
+#define NV_REG_DEBUG(type, dev, fmt, arg...) do { \
+	if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_##type) \
+		NV_PRINTK(KERN_DEBUG, dev, "%s: " fmt, __func__, ##arg); \
+} while (0)
+
+static inline bool
+nv_two_heads(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	const int impl = dev->pci_device & 0x0ff0;
+
+	if (dev_priv->card_type >= NV_10 && impl != 0x0100 &&
+	    impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
+		return true;
+
+	return false;
+}
+
+static inline bool
+nv_gf4_disp_arch(struct drm_device *dev)
+{
+	return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110;
+}
+
+static inline bool
+nv_two_reg_pll(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	const int impl = dev->pci_device & 0x0ff0;
+
+	if (impl == 0x0310 || impl == 0x0340 || dev_priv->card_type >= NV_40)
+		return true;
+	return false;
+}
+
+#define NV50_NVSW                                                    0x0000506e
+#define NV50_NVSW_DMA_SEMAPHORE                                      0x00000060
+#define NV50_NVSW_SEMAPHORE_OFFSET                                   0x00000064
+#define NV50_NVSW_SEMAPHORE_ACQUIRE                                  0x00000068
+#define NV50_NVSW_SEMAPHORE_RELEASE                                  0x0000006c
+#define NV50_NVSW_DMA_VBLSEM                                         0x0000018c
+#define NV50_NVSW_VBLSEM_OFFSET                                      0x00000400
+#define NV50_NVSW_VBLSEM_RELEASE_VALUE                               0x00000404
+#define NV50_NVSW_VBLSEM_RELEASE                                     0x00000408
+
+#endif /* __NOUVEAU_DRV_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
new file mode 100644
index 0000000..bc4a240
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_ENCODER_H__
+#define __NOUVEAU_ENCODER_H__
+
+#include "drm_encoder_slave.h"
+#include "nouveau_drv.h"
+
+#define NV_DPMS_CLEARED 0x80
+
+struct nouveau_encoder {
+	struct drm_encoder_slave base;
+
+	struct dcb_entry *dcb;
+	int or;
+
+	struct drm_display_mode mode;
+	int last_dpms;
+
+	struct nv04_output_reg restore;
+
+	void (*disconnect)(struct nouveau_encoder *encoder);
+
+	union {
+		struct {
+			int dpcd_version;
+			int link_nr;
+			int link_bw;
+		} dp;
+	};
+};
+
+static inline struct nouveau_encoder *nouveau_encoder(struct drm_encoder *enc)
+{
+	struct drm_encoder_slave *slave = to_encoder_slave(enc);
+
+	return container_of(slave, struct nouveau_encoder, base);
+}
+
+static inline struct drm_encoder *to_drm_encoder(struct nouveau_encoder *enc)
+{
+	return &enc->base.base;
+}
+
+struct nouveau_connector *
+nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
+int nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry);
+int nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry);
+
+struct bit_displayport_encoder_table {
+	uint32_t match;
+	uint8_t  record_nr;
+	uint8_t  unknown;
+	uint16_t script0;
+	uint16_t script1;
+	uint16_t unknown_table;
+} __attribute__ ((packed));
+
+struct bit_displayport_encoder_table_entry {
+	uint8_t vs_level;
+	uint8_t pre_level;
+	uint8_t reg0;
+	uint8_t reg1;
+	uint8_t reg2;
+} __attribute__ ((packed));
+
+#endif /* __NOUVEAU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
new file mode 100644
index 0000000..4a3f31a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_FB_H__
+#define __NOUVEAU_FB_H__
+
+struct nouveau_framebuffer {
+	struct drm_framebuffer base;
+	struct nouveau_bo *nvbo;
+};
+
+static inline struct nouveau_framebuffer *
+nouveau_framebuffer(struct drm_framebuffer *fb)
+{
+	return container_of(fb, struct nouveau_framebuffer, base);
+}
+
+extern const struct drm_mode_config_funcs nouveau_mode_config_funcs;
+
+struct drm_framebuffer *
+nouveau_framebuffer_create(struct drm_device *, struct nouveau_bo *,
+			   struct drm_mode_fb_cmd *);
+
+#endif /* __NOUVEAU_FB_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
new file mode 100644
index 0000000..36e8c5e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -0,0 +1,380 @@
+/*
+ * Copyright © 2007 David Airlie
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     David Airlie
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/screen_info.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_crtc.h"
+#include "nouveau_fb.h"
+#include "nouveau_fbcon.h"
+#include "nouveau_dma.h"
+
+static int
+nouveau_fbcon_sync(struct fb_info *info)
+{
+	struct nouveau_fbcon_par *par = info->par;
+	struct drm_device *dev = par->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = dev_priv->channel;
+	int ret, i;
+
+	if (!chan->accel_done ||
+	    info->state != FBINFO_STATE_RUNNING ||
+	    info->flags & FBINFO_HWACCEL_DISABLED)
+		return 0;
+
+	if (RING_SPACE(chan, 4)) {
+		NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+		info->flags |= FBINFO_HWACCEL_DISABLED;
+		return 0;
+	}
+
+	BEGIN_RING(chan, 0, 0x0104, 1);
+	OUT_RING(chan, 0);
+	BEGIN_RING(chan, 0, 0x0100, 1);
+	OUT_RING(chan, 0);
+	nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff);
+	FIRE_RING(chan);
+
+	ret = -EBUSY;
+	for (i = 0; i < 100000; i++) {
+		if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy + 3)) {
+			ret = 0;
+			break;
+		}
+		DRM_UDELAY(1);
+	}
+
+	if (ret) {
+		NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+		info->flags |= FBINFO_HWACCEL_DISABLED;
+		return 0;
+	}
+
+	chan->accel_done = false;
+	return 0;
+}
+
+static struct fb_ops nouveau_fbcon_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par,
+	.fb_setcolreg = drm_fb_helper_setcolreg,
+	.fb_fillrect = cfb_fillrect,
+	.fb_copyarea = cfb_copyarea,
+	.fb_imageblit = cfb_imageblit,
+	.fb_sync = nouveau_fbcon_sync,
+	.fb_pan_display = drm_fb_helper_pan_display,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+				    u16 blue, int regno)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+	nv_crtc->lut.r[regno] = red;
+	nv_crtc->lut.g[regno] = green;
+	nv_crtc->lut.b[regno] = blue;
+}
+
+static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+				    u16 *blue, int regno)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+	*red = nv_crtc->lut.r[regno];
+	*green = nv_crtc->lut.g[regno];
+	*blue = nv_crtc->lut.b[regno];
+}
+
+static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
+	.gamma_set = nouveau_fbcon_gamma_set,
+	.gamma_get = nouveau_fbcon_gamma_get
+};
+
+#if defined(__i386__) || defined(__x86_64__)
+static bool
+nouveau_fbcon_has_vesafb_or_efifb(struct drm_device *dev)
+{
+	struct pci_dev *pdev = dev->pdev;
+	int ramin;
+
+	if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB &&
+	    screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+		return false;
+
+	if (screen_info.lfb_base < pci_resource_start(pdev, 1))
+		goto not_fb;
+
+	if (screen_info.lfb_base + screen_info.lfb_size >=
+	    pci_resource_start(pdev, 1) + pci_resource_len(pdev, 1))
+		goto not_fb;
+
+	return true;
+not_fb:
+	ramin = 2;
+	if (pci_resource_len(pdev, ramin) == 0) {
+		ramin = 3;
+		if (pci_resource_len(pdev, ramin) == 0)
+			return false;
+	}
+
+	if (screen_info.lfb_base < pci_resource_start(pdev, ramin))
+		return false;
+
+	if (screen_info.lfb_base + screen_info.lfb_size >=
+	    pci_resource_start(pdev, ramin) + pci_resource_len(pdev, ramin))
+		return false;
+
+	return true;
+}
+#endif
+
+void
+nouveau_fbcon_zfill(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct fb_info *info = dev_priv->fbdev_info;
+	struct fb_fillrect rect;
+
+	/* Clear the entire fbcon.  The drm will program every connector
+	 * with it's preferred mode.  If the sizes differ, one display will
+	 * quite likely have garbage around the console.
+	 */
+	rect.dx = rect.dy = 0;
+	rect.width = info->var.xres_virtual;
+	rect.height = info->var.yres_virtual;
+	rect.color = 0;
+	rect.rop = ROP_COPY;
+	info->fbops->fb_fillrect(info, &rect);
+}
+
+static int
+nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
+		     uint32_t fb_height, uint32_t surface_width,
+		     uint32_t surface_height, uint32_t surface_depth,
+		     uint32_t surface_bpp, struct drm_framebuffer **pfb)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct fb_info *info;
+	struct nouveau_fbcon_par *par;
+	struct drm_framebuffer *fb;
+	struct nouveau_framebuffer *nouveau_fb;
+	struct nouveau_bo *nvbo;
+	struct drm_mode_fb_cmd mode_cmd;
+	struct device *device = &dev->pdev->dev;
+	int size, ret;
+
+	mode_cmd.width = surface_width;
+	mode_cmd.height = surface_height;
+
+	mode_cmd.bpp = surface_bpp;
+	mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
+	mode_cmd.pitch = ALIGN(mode_cmd.pitch, 256);
+	mode_cmd.depth = surface_depth;
+
+	size = mode_cmd.pitch * mode_cmd.height;
+	size = ALIGN(size, PAGE_SIZE);
+
+	ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM,
+			      0, 0x0000, false, true, &nvbo);
+	if (ret) {
+		NV_ERROR(dev, "failed to allocate framebuffer\n");
+		goto out;
+	}
+
+	ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM);
+	if (ret) {
+		NV_ERROR(dev, "failed to pin fb: %d\n", ret);
+		nouveau_bo_ref(NULL, &nvbo);
+		goto out;
+	}
+
+	ret = nouveau_bo_map(nvbo);
+	if (ret) {
+		NV_ERROR(dev, "failed to map fb: %d\n", ret);
+		nouveau_bo_unpin(nvbo);
+		nouveau_bo_ref(NULL, &nvbo);
+		goto out;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+
+	fb = nouveau_framebuffer_create(dev, nvbo, &mode_cmd);
+	if (!fb) {
+		ret = -ENOMEM;
+		NV_ERROR(dev, "failed to allocate fb.\n");
+		goto out_unref;
+	}
+
+	list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
+
+	nouveau_fb = nouveau_framebuffer(fb);
+	*pfb = fb;
+
+	info = framebuffer_alloc(sizeof(struct nouveau_fbcon_par), device);
+	if (!info) {
+		ret = -ENOMEM;
+		goto out_unref;
+	}
+
+	par = info->par;
+	par->helper.funcs = &nouveau_fbcon_helper_funcs;
+	par->helper.dev = dev;
+	ret = drm_fb_helper_init_crtc_count(&par->helper, 2, 4);
+	if (ret)
+		goto out_unref;
+	dev_priv->fbdev_info = info;
+
+	strcpy(info->fix.id, "nouveaufb");
+	info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
+		      FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT;
+	info->fbops = &nouveau_fbcon_ops;
+	info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
+			       dev_priv->vm_vram_base;
+	info->fix.smem_len = size;
+
+	info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
+	info->screen_size = size;
+
+	drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+	drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
+
+	/* FIXME: we really shouldn't expose mmio space at all */
+	info->fix.mmio_start = pci_resource_start(dev->pdev, 1);
+	info->fix.mmio_len = pci_resource_len(dev->pdev, 1);
+
+	/* Set aperture base/size for vesafb takeover */
+#if defined(__i386__) || defined(__x86_64__)
+	if (nouveau_fbcon_has_vesafb_or_efifb(dev)) {
+		/* Some NVIDIA VBIOS' are stupid and decide to put the
+		 * framebuffer in the middle of the PRAMIN BAR for
+		 * whatever reason.  We need to know the exact lfb_base
+		 * to get vesafb kicked off, and the only reliable way
+		 * we have left is to find out lfb_base the same way
+		 * vesafb did.
+		 */
+		info->aperture_base = screen_info.lfb_base;
+		info->aperture_size = screen_info.lfb_size;
+		if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB)
+			info->aperture_size *= 65536;
+	} else
+#endif
+	{
+		info->aperture_base = info->fix.mmio_start;
+		info->aperture_size = info->fix.mmio_len;
+	}
+
+	info->pixmap.size = 64*1024;
+	info->pixmap.buf_align = 8;
+	info->pixmap.access_align = 32;
+	info->pixmap.flags = FB_PIXMAP_SYSTEM;
+	info->pixmap.scan_align = 1;
+
+	fb->fbdev = info;
+
+	par->nouveau_fb = nouveau_fb;
+	par->dev = dev;
+
+	switch (dev_priv->card_type) {
+	case NV_50:
+		nv50_fbcon_accel_init(info);
+		break;
+	default:
+		nv04_fbcon_accel_init(info);
+		break;
+	};
+
+	nouveau_fbcon_zfill(dev);
+
+	/* To allow resizeing without swapping buffers */
+	NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n",
+						nouveau_fb->base.width,
+						nouveau_fb->base.height,
+						nvbo->bo.offset, nvbo);
+
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+
+out_unref:
+	mutex_unlock(&dev->struct_mutex);
+out:
+	return ret;
+}
+
+int
+nouveau_fbcon_probe(struct drm_device *dev)
+{
+	NV_DEBUG(dev, "\n");
+
+	return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create);
+}
+
+int
+nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+{
+	struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fb);
+	struct fb_info *info;
+
+	if (!fb)
+		return -EINVAL;
+
+	info = fb->fbdev;
+	if (info) {
+		struct nouveau_fbcon_par *par = info->par;
+
+		unregister_framebuffer(info);
+		nouveau_bo_unmap(nouveau_fb->nvbo);
+		mutex_lock(&dev->struct_mutex);
+		drm_gem_object_unreference(nouveau_fb->nvbo->gem);
+		nouveau_fb->nvbo = NULL;
+		mutex_unlock(&dev->struct_mutex);
+		if (par)
+			drm_fb_helper_free(&par->helper);
+		framebuffer_release(info);
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
new file mode 100644
index 0000000..8531140
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_FBCON_H__
+#define __NOUVEAU_FBCON_H__
+
+#include "drm_fb_helper.h"
+
+struct nouveau_fbcon_par {
+	struct drm_fb_helper helper;
+	struct drm_device *dev;
+	struct nouveau_framebuffer *nouveau_fb;
+};
+
+int nouveau_fbcon_probe(struct drm_device *dev);
+int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
+void nouveau_fbcon_restore(void);
+void nouveau_fbcon_zfill(struct drm_device *dev);
+
+int nv04_fbcon_accel_init(struct fb_info *info);
+int nv50_fbcon_accel_init(struct fb_info *info);
+
+#endif /* __NV50_FBCON_H__ */
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
new file mode 100644
index 0000000..0cff7eb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -0,0 +1,262 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+
+#define USE_REFCNT (dev_priv->card_type >= NV_10)
+
+struct nouveau_fence {
+	struct nouveau_channel *channel;
+	struct kref refcount;
+	struct list_head entry;
+
+	uint32_t sequence;
+	bool signalled;
+};
+
+static inline struct nouveau_fence *
+nouveau_fence(void *sync_obj)
+{
+	return (struct nouveau_fence *)sync_obj;
+}
+
+static void
+nouveau_fence_del(struct kref *ref)
+{
+	struct nouveau_fence *fence =
+		container_of(ref, struct nouveau_fence, refcount);
+
+	kfree(fence);
+}
+
+void
+nouveau_fence_update(struct nouveau_channel *chan)
+{
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+	struct list_head *entry, *tmp;
+	struct nouveau_fence *fence;
+	uint32_t sequence;
+
+	if (USE_REFCNT)
+		sequence = nvchan_rd32(chan, 0x48);
+	else
+		sequence = chan->fence.last_sequence_irq;
+
+	if (chan->fence.sequence_ack == sequence)
+		return;
+	chan->fence.sequence_ack = sequence;
+
+	list_for_each_safe(entry, tmp, &chan->fence.pending) {
+		fence = list_entry(entry, struct nouveau_fence, entry);
+
+		sequence = fence->sequence;
+		fence->signalled = true;
+		list_del(&fence->entry);
+		kref_put(&fence->refcount, nouveau_fence_del);
+
+		if (sequence == chan->fence.sequence_ack)
+			break;
+	}
+}
+
+int
+nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
+		  bool emit)
+{
+	struct nouveau_fence *fence;
+	int ret = 0;
+
+	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+	if (!fence)
+		return -ENOMEM;
+	kref_init(&fence->refcount);
+	fence->channel = chan;
+
+	if (emit)
+		ret = nouveau_fence_emit(fence);
+
+	if (ret)
+		nouveau_fence_unref((void *)&fence);
+	*pfence = fence;
+	return ret;
+}
+
+struct nouveau_channel *
+nouveau_fence_channel(struct nouveau_fence *fence)
+{
+	return fence ? fence->channel : NULL;
+}
+
+int
+nouveau_fence_emit(struct nouveau_fence *fence)
+{
+	struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private;
+	struct nouveau_channel *chan = fence->channel;
+	unsigned long flags;
+	int ret;
+
+	ret = RING_SPACE(chan, 2);
+	if (ret)
+		return ret;
+
+	if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
+		spin_lock_irqsave(&chan->fence.lock, flags);
+		nouveau_fence_update(chan);
+		spin_unlock_irqrestore(&chan->fence.lock, flags);
+
+		BUG_ON(chan->fence.sequence ==
+		       chan->fence.sequence_ack - 1);
+	}
+
+	fence->sequence = ++chan->fence.sequence;
+
+	kref_get(&fence->refcount);
+	spin_lock_irqsave(&chan->fence.lock, flags);
+	list_add_tail(&fence->entry, &chan->fence.pending);
+	spin_unlock_irqrestore(&chan->fence.lock, flags);
+
+	BEGIN_RING(chan, NvSubM2MF, USE_REFCNT ? 0x0050 : 0x0150, 1);
+	OUT_RING(chan, fence->sequence);
+	FIRE_RING(chan);
+
+	return 0;
+}
+
+void
+nouveau_fence_unref(void **sync_obj)
+{
+	struct nouveau_fence *fence = nouveau_fence(*sync_obj);
+
+	if (fence)
+		kref_put(&fence->refcount, nouveau_fence_del);
+	*sync_obj = NULL;
+}
+
+void *
+nouveau_fence_ref(void *sync_obj)
+{
+	struct nouveau_fence *fence = nouveau_fence(sync_obj);
+
+	kref_get(&fence->refcount);
+	return sync_obj;
+}
+
+bool
+nouveau_fence_signalled(void *sync_obj, void *sync_arg)
+{
+	struct nouveau_fence *fence = nouveau_fence(sync_obj);
+	struct nouveau_channel *chan = fence->channel;
+	unsigned long flags;
+
+	if (fence->signalled)
+		return true;
+
+	spin_lock_irqsave(&chan->fence.lock, flags);
+	nouveau_fence_update(chan);
+	spin_unlock_irqrestore(&chan->fence.lock, flags);
+	return fence->signalled;
+}
+
+int
+nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
+{
+	unsigned long timeout = jiffies + (3 * DRM_HZ);
+	int ret = 0;
+
+	__set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+
+	while (1) {
+		if (nouveau_fence_signalled(sync_obj, sync_arg))
+			break;
+
+		if (time_after_eq(jiffies, timeout)) {
+			ret = -EBUSY;
+			break;
+		}
+
+		if (lazy)
+			schedule_timeout(1);
+
+		if (intr && signal_pending(current)) {
+			ret = -ERESTART;
+			break;
+		}
+	}
+
+	__set_current_state(TASK_RUNNING);
+
+	return ret;
+}
+
+int
+nouveau_fence_flush(void *sync_obj, void *sync_arg)
+{
+	return 0;
+}
+
+void
+nouveau_fence_handler(struct drm_device *dev, int channel)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = NULL;
+
+	if (channel >= 0 && channel < dev_priv->engine.fifo.channels)
+		chan = dev_priv->fifos[channel];
+
+	if (chan) {
+		spin_lock_irq(&chan->fence.lock);
+		nouveau_fence_update(chan);
+		spin_unlock_irq(&chan->fence.lock);
+	}
+}
+
+int
+nouveau_fence_init(struct nouveau_channel *chan)
+{
+	INIT_LIST_HEAD(&chan->fence.pending);
+	spin_lock_init(&chan->fence.lock);
+	return 0;
+}
+
+void
+nouveau_fence_fini(struct nouveau_channel *chan)
+{
+	struct list_head *entry, *tmp;
+	struct nouveau_fence *fence;
+
+	list_for_each_safe(entry, tmp, &chan->fence.pending) {
+		fence = list_entry(entry, struct nouveau_fence, entry);
+
+		fence->signalled = true;
+		list_del(&fence->entry);
+		kref_put(&fence->refcount, nouveau_fence_del);
+	}
+}
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
new file mode 100644
index 0000000..11f831f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -0,0 +1,992 @@
+/*
+ * Copyright (C) 2008 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "drm.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+
+#define nouveau_gem_pushbuf_sync(chan) 0
+
+int
+nouveau_gem_object_new(struct drm_gem_object *gem)
+{
+	return 0;
+}
+
+void
+nouveau_gem_object_del(struct drm_gem_object *gem)
+{
+	struct nouveau_bo *nvbo = gem->driver_private;
+	struct ttm_buffer_object *bo = &nvbo->bo;
+
+	if (!nvbo)
+		return;
+	nvbo->gem = NULL;
+
+	if (unlikely(nvbo->cpu_filp))
+		ttm_bo_synccpu_write_release(bo);
+
+	if (unlikely(nvbo->pin_refcnt)) {
+		nvbo->pin_refcnt = 1;
+		nouveau_bo_unpin(nvbo);
+	}
+
+	ttm_bo_unref(&bo);
+}
+
+int
+nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
+		int size, int align, uint32_t flags, uint32_t tile_mode,
+		uint32_t tile_flags, bool no_vm, bool mappable,
+		struct nouveau_bo **pnvbo)
+{
+	struct nouveau_bo *nvbo;
+	int ret;
+
+	ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode,
+			     tile_flags, no_vm, mappable, pnvbo);
+	if (ret)
+		return ret;
+	nvbo = *pnvbo;
+
+	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
+	if (!nvbo->gem) {
+		nouveau_bo_ref(NULL, pnvbo);
+		return -ENOMEM;
+	}
+
+	nvbo->bo.persistant_swap_storage = nvbo->gem->filp;
+	nvbo->gem->driver_private = nvbo;
+	return 0;
+}
+
+static int
+nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
+{
+	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+
+	if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
+	else
+		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
+
+	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
+	rep->offset = nvbo->bo.offset;
+	rep->map_handle = nvbo->mappable ? nvbo->bo.addr_space_offset : 0;
+	rep->tile_mode = nvbo->tile_mode;
+	rep->tile_flags = nvbo->tile_flags;
+	return 0;
+}
+
+static bool
+nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) {
+	switch (tile_flags) {
+	case 0x0000:
+	case 0x1800:
+	case 0x2800:
+	case 0x4800:
+	case 0x7000:
+	case 0x7400:
+	case 0x7a00:
+	case 0xe000:
+		break;
+	default:
+		NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
+		return false;
+	}
+
+	return true;
+}
+
+int
+nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct drm_nouveau_gem_new *req = data;
+	struct nouveau_bo *nvbo = NULL;
+	struct nouveau_channel *chan = NULL;
+	uint32_t flags = 0;
+	int ret = 0;
+
+	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+	if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
+		dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
+
+	if (req->channel_hint) {
+		NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint,
+						     file_priv, chan);
+	}
+
+	if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
+		flags |= TTM_PL_FLAG_VRAM;
+	if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
+		flags |= TTM_PL_FLAG_TT;
+	if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
+		flags |= TTM_PL_FLAG_SYSTEM;
+
+	if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags))
+		return -EINVAL;
+
+	ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
+			      req->info.tile_mode, req->info.tile_flags, false,
+			      (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
+			      &nvbo);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gem_info(nvbo->gem, &req->info);
+	if (ret)
+		goto out;
+
+	ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
+out:
+	mutex_lock(&dev->struct_mutex);
+	drm_gem_object_handle_unreference(nvbo->gem);
+	mutex_unlock(&dev->struct_mutex);
+
+	if (ret)
+		drm_gem_object_unreference(nvbo->gem);
+	return ret;
+}
+
+static int
+nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
+		       uint32_t write_domains, uint32_t valid_domains)
+{
+	struct nouveau_bo *nvbo = gem->driver_private;
+	struct ttm_buffer_object *bo = &nvbo->bo;
+	uint64_t flags;
+
+	if (!valid_domains || (!read_domains && !write_domains))
+		return -EINVAL;
+
+	if (write_domains) {
+		if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
+		    (write_domains & NOUVEAU_GEM_DOMAIN_VRAM))
+			flags = TTM_PL_FLAG_VRAM;
+		else
+		if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) &&
+		    (write_domains & NOUVEAU_GEM_DOMAIN_GART))
+			flags = TTM_PL_FLAG_TT;
+		else
+			return -EINVAL;
+	} else {
+		if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
+		    (read_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
+		    bo->mem.mem_type == TTM_PL_VRAM)
+			flags = TTM_PL_FLAG_VRAM;
+		else
+		if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) &&
+		    (read_domains & NOUVEAU_GEM_DOMAIN_GART) &&
+		    bo->mem.mem_type == TTM_PL_TT)
+			flags = TTM_PL_FLAG_TT;
+		else
+		if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
+		    (read_domains & NOUVEAU_GEM_DOMAIN_VRAM))
+			flags = TTM_PL_FLAG_VRAM;
+		else
+			flags = TTM_PL_FLAG_TT;
+	}
+
+	nouveau_bo_placement_set(nvbo, flags);
+	return 0;
+}
+
+struct validate_op {
+	struct nouveau_fence *fence;
+	struct list_head vram_list;
+	struct list_head gart_list;
+	struct list_head both_list;
+};
+
+static void
+validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
+{
+	struct list_head *entry, *tmp;
+	struct nouveau_bo *nvbo;
+
+	list_for_each_safe(entry, tmp, list) {
+		nvbo = list_entry(entry, struct nouveau_bo, entry);
+		if (likely(fence)) {
+			struct nouveau_fence *prev_fence;
+
+			spin_lock(&nvbo->bo.lock);
+			prev_fence = nvbo->bo.sync_obj;
+			nvbo->bo.sync_obj = nouveau_fence_ref(fence);
+			spin_unlock(&nvbo->bo.lock);
+			nouveau_fence_unref((void *)&prev_fence);
+		}
+
+		list_del(&nvbo->entry);
+		nvbo->reserved_by = NULL;
+		ttm_bo_unreserve(&nvbo->bo);
+		drm_gem_object_unreference(nvbo->gem);
+	}
+}
+
+static void
+validate_fini(struct validate_op *op, bool success)
+{
+	struct nouveau_fence *fence = op->fence;
+
+	if (unlikely(!success))
+		op->fence = NULL;
+
+	validate_fini_list(&op->vram_list, op->fence);
+	validate_fini_list(&op->gart_list, op->fence);
+	validate_fini_list(&op->both_list, op->fence);
+	nouveau_fence_unref((void *)&fence);
+}
+
+static int
+validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
+	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
+	      int nr_buffers, struct validate_op *op)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t sequence;
+	int trycnt = 0;
+	int ret, i;
+
+	sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
+retry:
+	if (++trycnt > 100000) {
+		NV_ERROR(dev, "%s failed and gave up.\n", __func__);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < nr_buffers; i++) {
+		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
+		struct drm_gem_object *gem;
+		struct nouveau_bo *nvbo;
+
+		gem = drm_gem_object_lookup(dev, file_priv, b->handle);
+		if (!gem) {
+			NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
+			validate_fini(op, NULL);
+			return -EINVAL;
+		}
+		nvbo = gem->driver_private;
+
+		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
+			NV_ERROR(dev, "multiple instances of buffer %d on "
+				      "validation list\n", b->handle);
+			validate_fini(op, NULL);
+			return -EINVAL;
+		}
+
+		ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence);
+		if (ret) {
+			validate_fini(op, NULL);
+			if (ret == -EAGAIN)
+				ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
+			drm_gem_object_unreference(gem);
+			if (ret)
+				return ret;
+			goto retry;
+		}
+
+		nvbo->reserved_by = file_priv;
+		nvbo->pbbo_index = i;
+		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
+		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
+			list_add_tail(&nvbo->entry, &op->both_list);
+		else
+		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
+			list_add_tail(&nvbo->entry, &op->vram_list);
+		else
+		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
+			list_add_tail(&nvbo->entry, &op->gart_list);
+		else {
+			NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
+				 b->valid_domains);
+			validate_fini(op, NULL);
+			return -EINVAL;
+		}
+
+		if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) {
+			validate_fini(op, NULL);
+
+			if (nvbo->cpu_filp == file_priv) {
+				NV_ERROR(dev, "bo %p mapped by process trying "
+					      "to validate it!\n", nvbo);
+				return -EINVAL;
+			}
+
+			ret = ttm_bo_wait_cpu(&nvbo->bo, false);
+			if (ret == -ERESTART)
+				ret = -EAGAIN;
+			if (ret)
+				return ret;
+			goto retry;
+		}
+	}
+
+	return 0;
+}
+
+static int
+validate_list(struct nouveau_channel *chan, struct list_head *list,
+	      struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
+{
+	struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
+				(void __force __user *)(uintptr_t)user_pbbo_ptr;
+	struct nouveau_bo *nvbo;
+	int ret, relocs = 0;
+
+	list_for_each_entry(nvbo, list, entry) {
+		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
+		struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
+
+		if (prev_fence && nouveau_fence_channel(prev_fence) != chan) {
+			spin_lock(&nvbo->bo.lock);
+			ret = ttm_bo_wait(&nvbo->bo, false, false, false);
+			spin_unlock(&nvbo->bo.lock);
+			if (unlikely(ret))
+				return ret;
+		}
+
+		ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
+					     b->write_domains,
+					     b->valid_domains);
+		if (unlikely(ret))
+			return ret;
+
+		nvbo->channel = chan;
+		ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
+				      false, false);
+		nvbo->channel = NULL;
+		if (unlikely(ret))
+			return ret;
+
+		if (nvbo->bo.offset == b->presumed_offset &&
+		    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
+		      b->presumed_domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
+		     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
+		      b->presumed_domain & NOUVEAU_GEM_DOMAIN_GART)))
+			continue;
+
+		if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+			b->presumed_domain = NOUVEAU_GEM_DOMAIN_GART;
+		else
+			b->presumed_domain = NOUVEAU_GEM_DOMAIN_VRAM;
+		b->presumed_offset = nvbo->bo.offset;
+		b->presumed_ok = 0;
+		relocs++;
+
+		if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index], b, sizeof(*b)))
+			return -EFAULT;
+	}
+
+	return relocs;
+}
+
+static int
+nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
+			     struct drm_file *file_priv,
+			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
+			     uint64_t user_buffers, int nr_buffers,
+			     struct validate_op *op, int *apply_relocs)
+{
+	int ret, relocs = 0;
+
+	INIT_LIST_HEAD(&op->vram_list);
+	INIT_LIST_HEAD(&op->gart_list);
+	INIT_LIST_HEAD(&op->both_list);
+
+	ret = nouveau_fence_new(chan, &op->fence, false);
+	if (ret)
+		return ret;
+
+	if (nr_buffers == 0)
+		return 0;
+
+	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
+	if (unlikely(ret))
+		return ret;
+
+	ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
+	if (unlikely(ret < 0)) {
+		validate_fini(op, NULL);
+		return ret;
+	}
+	relocs += ret;
+
+	ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
+	if (unlikely(ret < 0)) {
+		validate_fini(op, NULL);
+		return ret;
+	}
+	relocs += ret;
+
+	ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
+	if (unlikely(ret < 0)) {
+		validate_fini(op, NULL);
+		return ret;
+	}
+	relocs += ret;
+
+	*apply_relocs = relocs;
+	return 0;
+}
+
+static inline void *
+u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
+{
+	void *mem;
+	void __user *userptr = (void __force __user *)(uintptr_t)user;
+
+	mem = kmalloc(nmemb * size, GFP_KERNEL);
+	if (!mem)
+		return ERR_PTR(-ENOMEM);
+
+	if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
+		kfree(mem);
+		return ERR_PTR(-EFAULT);
+	}
+
+	return mem;
+}
+
+static int
+nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo,
+				struct drm_nouveau_gem_pushbuf_bo *bo,
+				int nr_relocs, uint64_t ptr_relocs,
+				int nr_dwords, int first_dword,
+				uint32_t *pushbuf, bool is_iomem)
+{
+	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
+	struct drm_device *dev = chan->dev;
+	int ret = 0, i;
+
+	reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc));
+	if (IS_ERR(reloc))
+		return PTR_ERR(reloc);
+
+	for (i = 0; i < nr_relocs; i++) {
+		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
+		struct drm_nouveau_gem_pushbuf_bo *b;
+		uint32_t data;
+
+		if (r->bo_index >= nr_bo || r->reloc_index < first_dword ||
+		    r->reloc_index >= first_dword + nr_dwords) {
+			NV_ERROR(dev, "Bad relocation %d\n", i);
+			NV_ERROR(dev, "  bo: %d max %d\n", r->bo_index, nr_bo);
+			NV_ERROR(dev, "  id: %d max %d\n", r->reloc_index, nr_dwords);
+			ret = -EINVAL;
+			break;
+		}
+
+		b = &bo[r->bo_index];
+		if (b->presumed_ok)
+			continue;
+
+		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
+			data = b->presumed_offset + r->data;
+		else
+		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
+			data = (b->presumed_offset + r->data) >> 32;
+		else
+			data = r->data;
+
+		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
+			if (b->presumed_domain == NOUVEAU_GEM_DOMAIN_GART)
+				data |= r->tor;
+			else
+				data |= r->vor;
+		}
+
+		if (is_iomem)
+			iowrite32_native(data, (void __force __iomem *)
+						&pushbuf[r->reloc_index]);
+		else
+			pushbuf[r->reloc_index] = data;
+	}
+
+	kfree(reloc);
+	return ret;
+}
+
+int
+nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	struct drm_nouveau_gem_pushbuf *req = data;
+	struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
+	struct nouveau_channel *chan;
+	struct validate_op op;
+	uint32_t *pushbuf = NULL;
+	int ret = 0, do_reloc = 0, i;
+
+	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
+
+	if (req->nr_dwords >= chan->dma.max ||
+	    req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
+	    req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
+		NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
+		NV_ERROR(dev, "  dwords : %d max %d\n", req->nr_dwords,
+			 chan->dma.max - 1);
+		NV_ERROR(dev, "  buffers: %d max %d\n", req->nr_buffers,
+			 NOUVEAU_GEM_MAX_BUFFERS);
+		NV_ERROR(dev, "  relocs : %d max %d\n", req->nr_relocs,
+			 NOUVEAU_GEM_MAX_RELOCS);
+		return -EINVAL;
+	}
+
+	pushbuf = u_memcpya(req->dwords, req->nr_dwords, sizeof(uint32_t));
+	if (IS_ERR(pushbuf))
+		return PTR_ERR(pushbuf);
+
+	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
+	if (IS_ERR(bo)) {
+		kfree(pushbuf);
+		return PTR_ERR(bo);
+	}
+
+	mutex_lock(&dev->struct_mutex);
+
+	/* Validate buffer list */
+	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
+					   req->nr_buffers, &op, &do_reloc);
+	if (ret)
+		goto out;
+
+	/* Apply any relocations that are required */
+	if (do_reloc) {
+		ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers,
+						      bo, req->nr_relocs,
+						      req->relocs,
+						      req->nr_dwords, 0,
+						      pushbuf, false);
+		if (ret)
+			goto out;
+	}
+
+	/* Emit push buffer to the hw
+	 */
+	ret = RING_SPACE(chan, req->nr_dwords);
+	if (ret)
+		goto out;
+
+	OUT_RINGp(chan, pushbuf, req->nr_dwords);
+
+	ret = nouveau_fence_emit(op.fence);
+	if (ret) {
+		NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
+		WIND_RING(chan);
+		goto out;
+	}
+
+	if (nouveau_gem_pushbuf_sync(chan)) {
+		ret = nouveau_fence_wait(op.fence, NULL, false, false);
+		if (ret) {
+			for (i = 0; i < req->nr_dwords; i++)
+				NV_ERROR(dev, "0x%08x\n", pushbuf[i]);
+			NV_ERROR(dev, "^^ above push buffer is fail :(\n");
+		}
+	}
+
+out:
+	validate_fini(&op, ret == 0);
+	mutex_unlock(&dev->struct_mutex);
+	kfree(pushbuf);
+	kfree(bo);
+	return ret;
+}
+
+#define PUSHBUF_CAL (dev_priv->card_type >= NV_20)
+
+int
+nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
+			       struct drm_file *file_priv)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct drm_nouveau_gem_pushbuf_call *req = data;
+	struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
+	struct nouveau_channel *chan;
+	struct drm_gem_object *gem;
+	struct nouveau_bo *pbbo;
+	struct validate_op op;
+	int i, ret = 0, do_reloc = 0;
+
+	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
+
+	if (unlikely(req->handle == 0))
+		goto out_next;
+
+	if (req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
+	    req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
+		NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
+		NV_ERROR(dev, "  buffers: %d max %d\n", req->nr_buffers,
+			 NOUVEAU_GEM_MAX_BUFFERS);
+		NV_ERROR(dev, "  relocs : %d max %d\n", req->nr_relocs,
+			 NOUVEAU_GEM_MAX_RELOCS);
+		return -EINVAL;
+	}
+
+	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
+	if (IS_ERR(bo))
+		return PTR_ERR(bo);
+
+	mutex_lock(&dev->struct_mutex);
+
+	/* Validate buffer list */
+	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
+					   req->nr_buffers, &op, &do_reloc);
+	if (ret) {
+		NV_ERROR(dev, "validate: %d\n", ret);
+		goto out;
+	}
+
+	/* Validate DMA push buffer */
+	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+	if (!gem) {
+		NV_ERROR(dev, "Unknown pb handle 0x%08x\n", req->handle);
+		ret = -EINVAL;
+		goto out;
+	}
+	pbbo = nouveau_gem_object(gem);
+
+	ret = ttm_bo_reserve(&pbbo->bo, false, false, true,
+			     chan->fence.sequence);
+	if (ret) {
+		NV_ERROR(dev, "resv pb: %d\n", ret);
+		drm_gem_object_unreference(gem);
+		goto out;
+	}
+
+	nouveau_bo_placement_set(pbbo, 1 << chan->pushbuf_bo->bo.mem.mem_type);
+	ret = ttm_bo_validate(&pbbo->bo, &pbbo->placement, false, false);
+	if (ret) {
+		NV_ERROR(dev, "validate pb: %d\n", ret);
+		ttm_bo_unreserve(&pbbo->bo);
+		drm_gem_object_unreference(gem);
+		goto out;
+	}
+
+	list_add_tail(&pbbo->entry, &op.both_list);
+
+	/* If presumed return address doesn't match, we need to map the
+	 * push buffer and fix it..
+	 */
+	if (!PUSHBUF_CAL) {
+		uint32_t retaddy;
+
+		if (chan->dma.free < 4 + NOUVEAU_DMA_SKIPS) {
+			ret = nouveau_dma_wait(chan, 4 + NOUVEAU_DMA_SKIPS);
+			if (ret) {
+				NV_ERROR(dev, "jmp_space: %d\n", ret);
+				goto out;
+			}
+		}
+
+		retaddy  = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
+		retaddy |= 0x20000000;
+		if (retaddy != req->suffix0) {
+			req->suffix0 = retaddy;
+			do_reloc = 1;
+		}
+	}
+
+	/* Apply any relocations that are required */
+	if (do_reloc) {
+		void *pbvirt;
+		bool is_iomem;
+		ret = ttm_bo_kmap(&pbbo->bo, 0, pbbo->bo.mem.num_pages,
+				  &pbbo->kmap);
+		if (ret) {
+			NV_ERROR(dev, "kmap pb: %d\n", ret);
+			goto out;
+		}
+
+		pbvirt = ttm_kmap_obj_virtual(&pbbo->kmap, &is_iomem);
+		ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers, bo,
+						      req->nr_relocs,
+						      req->relocs,
+						      req->nr_dwords,
+						      req->offset / 4,
+						      pbvirt, is_iomem);
+
+		if (!PUSHBUF_CAL) {
+			nouveau_bo_wr32(pbbo,
+					req->offset / 4 + req->nr_dwords - 2,
+					req->suffix0);
+		}
+
+		ttm_bo_kunmap(&pbbo->kmap);
+		if (ret) {
+			NV_ERROR(dev, "reloc apply: %d\n", ret);
+			goto out;
+		}
+	}
+
+	if (PUSHBUF_CAL) {
+		ret = RING_SPACE(chan, 2);
+		if (ret) {
+			NV_ERROR(dev, "cal_space: %d\n", ret);
+			goto out;
+		}
+		OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +
+				  req->offset) | 2);
+		OUT_RING(chan, 0);
+	} else {
+		ret = RING_SPACE(chan, 2 + NOUVEAU_DMA_SKIPS);
+		if (ret) {
+			NV_ERROR(dev, "jmp_space: %d\n", ret);
+			goto out;
+		}
+		OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +
+				  req->offset) | 0x20000000);
+		OUT_RING(chan, 0);
+
+		/* Space the jumps apart with NOPs. */
+		for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
+			OUT_RING(chan, 0);
+	}
+
+	ret = nouveau_fence_emit(op.fence);
+	if (ret) {
+		NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
+		WIND_RING(chan);
+		goto out;
+	}
+
+out:
+	validate_fini(&op, ret == 0);
+	mutex_unlock(&dev->struct_mutex);
+	kfree(bo);
+
+out_next:
+	if (PUSHBUF_CAL) {
+		req->suffix0 = 0x00020000;
+		req->suffix1 = 0x00000000;
+	} else {
+		req->suffix0 = 0x20000000 |
+			      (chan->pushbuf_base + ((chan->dma.cur + 2) << 2));
+		req->suffix1 = 0x00000000;
+	}
+
+	return ret;
+}
+
+int
+nouveau_gem_ioctl_pushbuf_call2(struct drm_device *dev, void *data,
+				struct drm_file *file_priv)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct drm_nouveau_gem_pushbuf_call *req = data;
+
+	req->vram_available = dev_priv->fb_aper_free;
+	req->gart_available = dev_priv->gart_info.aper_free;
+
+	return nouveau_gem_ioctl_pushbuf_call(dev, data, file_priv);
+}
+
+static inline uint32_t
+domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
+{
+	uint32_t flags = 0;
+
+	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
+		flags |= TTM_PL_FLAG_VRAM;
+	if (domain & NOUVEAU_GEM_DOMAIN_GART)
+		flags |= TTM_PL_FLAG_TT;
+
+	return flags;
+}
+
+int
+nouveau_gem_ioctl_pin(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	struct drm_nouveau_gem_pin *req = data;
+	struct drm_gem_object *gem;
+	struct nouveau_bo *nvbo;
+	int ret = 0;
+
+	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		NV_ERROR(dev, "pin only allowed without kernel modesetting\n");
+		return -EINVAL;
+	}
+
+	if (!DRM_SUSER(DRM_CURPROC))
+		return -EPERM;
+
+	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+	if (!gem)
+		return -EINVAL;
+	nvbo = nouveau_gem_object(gem);
+
+	ret = nouveau_bo_pin(nvbo, domain_to_ttm(nvbo, req->domain));
+	if (ret)
+		goto out;
+
+	req->offset = nvbo->bo.offset;
+	if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+		req->domain = NOUVEAU_GEM_DOMAIN_GART;
+	else
+		req->domain = NOUVEAU_GEM_DOMAIN_VRAM;
+
+out:
+	mutex_lock(&dev->struct_mutex);
+	drm_gem_object_unreference(gem);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+int
+nouveau_gem_ioctl_unpin(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_nouveau_gem_pin *req = data;
+	struct drm_gem_object *gem;
+	int ret;
+
+	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+	if (!gem)
+		return -EINVAL;
+
+	ret = nouveau_bo_unpin(nouveau_gem_object(gem));
+
+	mutex_lock(&dev->struct_mutex);
+	drm_gem_object_unreference(gem);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+int
+nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	struct drm_nouveau_gem_cpu_prep *req = data;
+	struct drm_gem_object *gem;
+	struct nouveau_bo *nvbo;
+	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
+	int ret = -EINVAL;
+
+	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+	if (!gem)
+		return ret;
+	nvbo = nouveau_gem_object(gem);
+
+	if (nvbo->cpu_filp) {
+		if (nvbo->cpu_filp == file_priv)
+			goto out;
+
+		ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
+		if (ret == -ERESTART)
+			ret = -EAGAIN;
+		if (ret)
+			goto out;
+	}
+
+	if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
+		ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
+	} else {
+		ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
+		if (ret == -ERESTART)
+			ret = -EAGAIN;
+		else
+		if (ret == 0)
+			nvbo->cpu_filp = file_priv;
+	}
+
+out:
+	mutex_lock(&dev->struct_mutex);
+	drm_gem_object_unreference(gem);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+int
+nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	struct drm_nouveau_gem_cpu_prep *req = data;
+	struct drm_gem_object *gem;
+	struct nouveau_bo *nvbo;
+	int ret = -EINVAL;
+
+	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+	if (!gem)
+		return ret;
+	nvbo = nouveau_gem_object(gem);
+
+	if (nvbo->cpu_filp != file_priv)
+		goto out;
+	nvbo->cpu_filp = NULL;
+
+	ttm_bo_synccpu_write_release(&nvbo->bo);
+	ret = 0;
+
+out:
+	mutex_lock(&dev->struct_mutex);
+	drm_gem_object_unreference(gem);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+int
+nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct drm_nouveau_gem_info *req = data;
+	struct drm_gem_object *gem;
+	int ret;
+
+	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+	if (!gem)
+		return -EINVAL;
+
+	ret = nouveau_gem_info(gem, req);
+	mutex_lock(&dev->struct_mutex);
+	drm_gem_object_unreference(gem);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
new file mode 100644
index 0000000..dc46792
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -0,0 +1,1080 @@
+/*
+ * Copyright 2006 Dave Airlie
+ * Copyright 2007 Maarten Maathuis
+ * Copyright 2007-2009 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+
+#define CHIPSET_NFORCE 0x01a0
+#define CHIPSET_NFORCE2 0x01f0
+
+/*
+ * misc hw access wrappers/control functions
+ */
+
+void
+NVWriteVgaSeq(struct drm_device *dev, int head, uint8_t index, uint8_t value)
+{
+	NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index);
+	NVWritePRMVIO(dev, head, NV_PRMVIO_SR, value);
+}
+
+uint8_t
+NVReadVgaSeq(struct drm_device *dev, int head, uint8_t index)
+{
+	NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index);
+	return NVReadPRMVIO(dev, head, NV_PRMVIO_SR);
+}
+
+void
+NVWriteVgaGr(struct drm_device *dev, int head, uint8_t index, uint8_t value)
+{
+	NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index);
+	NVWritePRMVIO(dev, head, NV_PRMVIO_GX, value);
+}
+
+uint8_t
+NVReadVgaGr(struct drm_device *dev, int head, uint8_t index)
+{
+	NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index);
+	return NVReadPRMVIO(dev, head, NV_PRMVIO_GX);
+}
+
+/* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied)
+ * it affects only the 8 bit vga io regs, which we access using mmio at
+ * 0xc{0,2}3c*, 0x60{1,3}3*, and 0x68{1,3}3d*
+ * in general, the set value of cr44 does not matter: reg access works as
+ * expected and values can be set for the appropriate head by using a 0x2000
+ * offset as required
+ * however:
+ * a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and
+ *    cr44 must be set to 0 or 3 for accessing values on the correct head
+ *    through the common 0xc03c* addresses
+ * b) in tied mode (4) head B is programmed to the values set on head A, and
+ *    access using the head B addresses can have strange results, ergo we leave
+ *    tied mode in init once we know to what cr44 should be restored on exit
+ *
+ * the owner parameter is slightly abused:
+ * 0 and 1 are treated as head values and so the set value is (owner * 3)
+ * other values are treated as literal values to set
+ */
+void
+NVSetOwner(struct drm_device *dev, int owner)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (owner == 1)
+		owner *= 3;
+
+	if (dev_priv->chipset == 0x11) {
+		/* This might seem stupid, but the blob does it and
+		 * omitting it often locks the system up.
+		 */
+		NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX);
+		NVReadVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX);
+	}
+
+	/* CR44 is always changed on CRTC0 */
+	NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner);
+
+	if (dev_priv->chipset == 0x11) {	/* set me harder */
+		NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
+		NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
+	}
+}
+
+void
+NVBlankScreen(struct drm_device *dev, int head, bool blank)
+{
+	unsigned char seq1;
+
+	if (nv_two_heads(dev))
+		NVSetOwner(dev, head);
+
+	seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX);
+
+	NVVgaSeqReset(dev, head, true);
+	if (blank)
+		NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20);
+	else
+		NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20);
+	NVVgaSeqReset(dev, head, false);
+}
+
+/*
+ * PLL setting
+ */
+
+static int
+powerctrl_1_shift(int chip_version, int reg)
+{
+	int shift = -4;
+
+	if (chip_version < 0x17 || chip_version == 0x1a || chip_version == 0x20)
+		return shift;
+
+	switch (reg) {
+	case NV_RAMDAC_VPLL2:
+		shift += 4;
+	case NV_PRAMDAC_VPLL_COEFF:
+		shift += 4;
+	case NV_PRAMDAC_MPLL_COEFF:
+		shift += 4;
+	case NV_PRAMDAC_NVPLL_COEFF:
+		shift += 4;
+	}
+
+	/*
+	 * the shift for vpll regs is only used for nv3x chips with a single
+	 * stage pll
+	 */
+	if (shift > 4 && (chip_version < 0x32 || chip_version == 0x35 ||
+			  chip_version == 0x36 || chip_version >= 0x40))
+		shift = -4;
+
+	return shift;
+}
+
+static void
+setPLL_single(struct drm_device *dev, uint32_t reg, struct nouveau_pll_vals *pv)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int chip_version = dev_priv->vbios->chip_version;
+	uint32_t oldpll = NVReadRAMDAC(dev, 0, reg);
+	int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
+	uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
+	uint32_t saved_powerctrl_1 = 0;
+	int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg);
+
+	if (oldpll == pll)
+		return;	/* already set */
+
+	if (shift_powerctrl_1 >= 0) {
+		saved_powerctrl_1 = nvReadMC(dev, NV_PBUS_POWERCTRL_1);
+		nvWriteMC(dev, NV_PBUS_POWERCTRL_1,
+			(saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
+			1 << shift_powerctrl_1);
+	}
+
+	if (oldM && pv->M1 && (oldN / oldM < pv->N1 / pv->M1))
+		/* upclock -- write new post divider first */
+		NVWriteRAMDAC(dev, 0, reg, pv->log2P << 16 | (oldpll & 0xffff));
+	else
+		/* downclock -- write new NM first */
+		NVWriteRAMDAC(dev, 0, reg, (oldpll & 0xffff0000) | pv->NM1);
+
+	if (chip_version < 0x17 && chip_version != 0x11)
+		/* wait a bit on older chips */
+		msleep(64);
+	NVReadRAMDAC(dev, 0, reg);
+
+	/* then write the other half as well */
+	NVWriteRAMDAC(dev, 0, reg, pll);
+
+	if (shift_powerctrl_1 >= 0)
+		nvWriteMC(dev, NV_PBUS_POWERCTRL_1, saved_powerctrl_1);
+}
+
+static uint32_t
+new_ramdac580(uint32_t reg1, bool ss, uint32_t ramdac580)
+{
+	bool head_a = (reg1 == NV_PRAMDAC_VPLL_COEFF);
+
+	if (ss)	/* single stage pll mode */
+		ramdac580 |= head_a ? NV_RAMDAC_580_VPLL1_ACTIVE :
+				      NV_RAMDAC_580_VPLL2_ACTIVE;
+	else
+		ramdac580 &= head_a ? ~NV_RAMDAC_580_VPLL1_ACTIVE :
+				      ~NV_RAMDAC_580_VPLL2_ACTIVE;
+
+	return ramdac580;
+}
+
+static void
+setPLL_double_highregs(struct drm_device *dev, uint32_t reg1,
+		       struct nouveau_pll_vals *pv)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int chip_version = dev_priv->vbios->chip_version;
+	bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
+	uint32_t reg2 = reg1 + ((reg1 == NV_RAMDAC_VPLL2) ? 0x5c : 0x70);
+	uint32_t oldpll1 = NVReadRAMDAC(dev, 0, reg1);
+	uint32_t oldpll2 = !nv3035 ? NVReadRAMDAC(dev, 0, reg2) : 0;
+	uint32_t pll1 = (oldpll1 & 0xfff80000) | pv->log2P << 16 | pv->NM1;
+	uint32_t pll2 = (oldpll2 & 0x7fff0000) | 1 << 31 | pv->NM2;
+	uint32_t oldramdac580 = 0, ramdac580 = 0;
+	bool single_stage = !pv->NM2 || pv->N2 == pv->M2;	/* nv41+ only */
+	uint32_t saved_powerctrl_1 = 0, savedc040 = 0;
+	int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg1);
+
+	/* model specific additions to generic pll1 and pll2 set up above */
+	if (nv3035) {
+		pll1 = (pll1 & 0xfcc7ffff) | (pv->N2 & 0x18) << 21 |
+		       (pv->N2 & 0x7) << 19 | 8 << 4 | (pv->M2 & 7) << 4;
+		pll2 = 0;
+	}
+	if (chip_version > 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) { /* !nv40 */
+		oldramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
+		ramdac580 = new_ramdac580(reg1, single_stage, oldramdac580);
+		if (oldramdac580 != ramdac580)
+			oldpll1 = ~0;	/* force mismatch */
+		if (single_stage)
+			/* magic value used by nvidia in single stage mode */
+			pll2 |= 0x011f;
+	}
+	if (chip_version > 0x70)
+		/* magic bits set by the blob (but not the bios) on g71-73 */
+		pll1 = (pll1 & 0x7fffffff) | (single_stage ? 0x4 : 0xc) << 28;
+
+	if (oldpll1 == pll1 && oldpll2 == pll2)
+		return;	/* already set */
+
+	if (shift_powerctrl_1 >= 0) {
+		saved_powerctrl_1 = nvReadMC(dev, NV_PBUS_POWERCTRL_1);
+		nvWriteMC(dev, NV_PBUS_POWERCTRL_1,
+			(saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
+			1 << shift_powerctrl_1);
+	}
+
+	if (chip_version >= 0x40) {
+		int shift_c040 = 14;
+
+		switch (reg1) {
+		case NV_PRAMDAC_MPLL_COEFF:
+			shift_c040 += 2;
+		case NV_PRAMDAC_NVPLL_COEFF:
+			shift_c040 += 2;
+		case NV_RAMDAC_VPLL2:
+			shift_c040 += 2;
+		case NV_PRAMDAC_VPLL_COEFF:
+			shift_c040 += 2;
+		}
+
+		savedc040 = nvReadMC(dev, 0xc040);
+		if (shift_c040 != 14)
+			nvWriteMC(dev, 0xc040, savedc040 & ~(3 << shift_c040));
+	}
+
+	if (oldramdac580 != ramdac580)
+		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_580, ramdac580);
+
+	if (!nv3035)
+		NVWriteRAMDAC(dev, 0, reg2, pll2);
+	NVWriteRAMDAC(dev, 0, reg1, pll1);
+
+	if (shift_powerctrl_1 >= 0)
+		nvWriteMC(dev, NV_PBUS_POWERCTRL_1, saved_powerctrl_1);
+	if (chip_version >= 0x40)
+		nvWriteMC(dev, 0xc040, savedc040);
+}
+
+static void
+setPLL_double_lowregs(struct drm_device *dev, uint32_t NMNMreg,
+		      struct nouveau_pll_vals *pv)
+{
+	/* When setting PLLs, there is a merry game of disabling and enabling
+	 * various bits of hardware during the process. This function is a
+	 * synthesis of six nv4x traces, nearly each card doing a subtly
+	 * different thing. With luck all the necessary bits for each card are
+	 * combined herein. Without luck it deviates from each card's formula
+	 * so as to not work on any :)
+	 */
+
+	uint32_t Preg = NMNMreg - 4;
+	bool mpll = Preg == 0x4020;
+	uint32_t oldPval = nvReadMC(dev, Preg);
+	uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
+	uint32_t Pval = (oldPval & (mpll ? ~(0x11 << 16) : ~(1 << 16))) |
+			0xc << 28 | pv->log2P << 16;
+	uint32_t saved4600 = 0;
+	/* some cards have different maskc040s */
+	uint32_t maskc040 = ~(3 << 14), savedc040;
+	bool single_stage = !pv->NM2 || pv->N2 == pv->M2;
+
+	if (nvReadMC(dev, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
+		return;
+
+	if (Preg == 0x4000)
+		maskc040 = ~0x333;
+	if (Preg == 0x4058)
+		maskc040 = ~(0xc << 24);
+
+	if (mpll) {
+		struct pll_lims pll_lim;
+		uint8_t Pval2;
+
+		if (get_pll_limits(dev, Preg, &pll_lim))
+			return;
+
+		Pval2 = pv->log2P + pll_lim.log2p_bias;
+		if (Pval2 > pll_lim.max_log2p)
+			Pval2 = pll_lim.max_log2p;
+		Pval |= 1 << 28 | Pval2 << 20;
+
+		saved4600 = nvReadMC(dev, 0x4600);
+		nvWriteMC(dev, 0x4600, saved4600 | 8 << 28);
+	}
+	if (single_stage)
+		Pval |= mpll ? 1 << 12 : 1 << 8;
+
+	nvWriteMC(dev, Preg, oldPval | 1 << 28);
+	nvWriteMC(dev, Preg, Pval & ~(4 << 28));
+	if (mpll) {
+		Pval |= 8 << 20;
+		nvWriteMC(dev, 0x4020, Pval & ~(0xc << 28));
+		nvWriteMC(dev, 0x4038, Pval & ~(0xc << 28));
+	}
+
+	savedc040 = nvReadMC(dev, 0xc040);
+	nvWriteMC(dev, 0xc040, savedc040 & maskc040);
+
+	nvWriteMC(dev, NMNMreg, NMNM);
+	if (NMNMreg == 0x4024)
+		nvWriteMC(dev, 0x403c, NMNM);
+
+	nvWriteMC(dev, Preg, Pval);
+	if (mpll) {
+		Pval &= ~(8 << 20);
+		nvWriteMC(dev, 0x4020, Pval);
+		nvWriteMC(dev, 0x4038, Pval);
+		nvWriteMC(dev, 0x4600, saved4600);
+	}
+
+	nvWriteMC(dev, 0xc040, savedc040);
+
+	if (mpll) {
+		nvWriteMC(dev, 0x4020, Pval & ~(1 << 28));
+		nvWriteMC(dev, 0x4038, Pval & ~(1 << 28));
+	}
+}
+
+void
+nouveau_hw_setpll(struct drm_device *dev, uint32_t reg1,
+		  struct nouveau_pll_vals *pv)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int cv = dev_priv->vbios->chip_version;
+
+	if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
+	    cv >= 0x40) {
+		if (reg1 > 0x405c)
+			setPLL_double_highregs(dev, reg1, pv);
+		else
+			setPLL_double_lowregs(dev, reg1, pv);
+	} else
+		setPLL_single(dev, reg1, pv);
+}
+
+/*
+ * PLL getting
+ */
+
+static void
+nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
+		      uint32_t pll2, struct nouveau_pll_vals *pllvals)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	/* to force parsing as single stage (i.e. nv40 vplls) pass pll2 as 0 */
+
+	/* log2P is & 0x7 as never more than 7, and nv30/35 only uses 3 bits */
+	pllvals->log2P = (pll1 >> 16) & 0x7;
+	pllvals->N2 = pllvals->M2 = 1;
+
+	if (reg1 <= 0x405c) {
+		pllvals->NM1 = pll2 & 0xffff;
+		/* single stage NVPLL and VPLLs use 1 << 8, MPLL uses 1 << 12 */
+		if (!(pll1 & 0x1100))
+			pllvals->NM2 = pll2 >> 16;
+	} else {
+		pllvals->NM1 = pll1 & 0xffff;
+		if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2)
+			pllvals->NM2 = pll2 & 0xffff;
+		else if (dev_priv->chipset == 0x30 || dev_priv->chipset == 0x35) {
+			pllvals->M1 &= 0xf; /* only 4 bits */
+			if (pll1 & NV30_RAMDAC_ENABLE_VCO2) {
+				pllvals->M2 = (pll1 >> 4) & 0x7;
+				pllvals->N2 = ((pll1 >> 21) & 0x18) |
+					      ((pll1 >> 19) & 0x7);
+			}
+		}
+	}
+}
+
+int
+nouveau_hw_get_pllvals(struct drm_device *dev, enum pll_types plltype,
+		       struct nouveau_pll_vals *pllvals)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	const uint32_t nv04_regs[MAX_PLL_TYPES] = { NV_PRAMDAC_NVPLL_COEFF,
+						    NV_PRAMDAC_MPLL_COEFF,
+						    NV_PRAMDAC_VPLL_COEFF,
+						    NV_RAMDAC_VPLL2 };
+	const uint32_t nv40_regs[MAX_PLL_TYPES] = { 0x4000,
+						    0x4020,
+						    NV_PRAMDAC_VPLL_COEFF,
+						    NV_RAMDAC_VPLL2 };
+	uint32_t reg1, pll1, pll2 = 0;
+	struct pll_lims pll_lim;
+	int ret;
+
+	if (dev_priv->card_type < NV_40)
+		reg1 = nv04_regs[plltype];
+	else
+		reg1 = nv40_regs[plltype];
+
+	pll1 = nvReadMC(dev, reg1);
+
+	if (reg1 <= 0x405c)
+		pll2 = nvReadMC(dev, reg1 + 4);
+	else if (nv_two_reg_pll(dev)) {
+		uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70);
+
+		pll2 = nvReadMC(dev, reg2);
+	}
+
+	if (dev_priv->card_type == 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) {
+		uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
+
+		/* check whether vpll has been forced into single stage mode */
+		if (reg1 == NV_PRAMDAC_VPLL_COEFF) {
+			if (ramdac580 & NV_RAMDAC_580_VPLL1_ACTIVE)
+				pll2 = 0;
+		} else
+			if (ramdac580 & NV_RAMDAC_580_VPLL2_ACTIVE)
+				pll2 = 0;
+	}
+
+	nouveau_hw_decode_pll(dev, reg1, pll1, pll2, pllvals);
+
+	ret = get_pll_limits(dev, plltype, &pll_lim);
+	if (ret)
+		return ret;
+
+	pllvals->refclk = pll_lim.refclk;
+
+	return 0;
+}
+
+int
+nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pv)
+{
+	/* Avoid divide by zero if called at an inappropriate time */
+	if (!pv->M1 || !pv->M2)
+		return 0;
+
+	return pv->N1 * pv->N2 * pv->refclk / (pv->M1 * pv->M2) >> pv->log2P;
+}
+
+int
+nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype)
+{
+	struct nouveau_pll_vals pllvals;
+
+	if (plltype == MPLL && (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) {
+		uint32_t mpllP;
+
+		pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
+		if (!mpllP)
+			mpllP = 4;
+
+		return 400000 / mpllP;
+	} else
+	if (plltype == MPLL && (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) {
+		uint32_t clock;
+
+		pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
+		return clock;
+	}
+
+	nouveau_hw_get_pllvals(dev, plltype, &pllvals);
+
+	return nouveau_hw_pllvals_to_clk(&pllvals);
+}
+
+static void
+nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
+{
+	/* the vpll on an unused head can come up with a random value, way
+	 * beyond the pll limits.  for some reason this causes the chip to
+	 * lock up when reading the dac palette regs, so set a valid pll here
+	 * when such a condition detected.  only seen on nv11 to date
+	 */
+
+	struct pll_lims pll_lim;
+	struct nouveau_pll_vals pv;
+	uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
+
+	if (get_pll_limits(dev, head ? VPLL2 : VPLL1, &pll_lim))
+		return;
+	nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &pv);
+
+	if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m &&
+	    pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n &&
+	    pv.log2P <= pll_lim.max_log2p)
+		return;
+
+	NV_WARN(dev, "VPLL %d outwith limits, attempting to fix\n", head + 1);
+
+	/* set lowest clock within static limits */
+	pv.M1 = pll_lim.vco1.max_m;
+	pv.N1 = pll_lim.vco1.min_n;
+	pv.log2P = pll_lim.max_usable_log2p;
+	nouveau_hw_setpll(dev, pllreg, &pv);
+}
+
+/*
+ * vga font save/restore
+ */
+
+static void nouveau_vga_font_io(struct drm_device *dev,
+				void __iomem *iovram,
+				bool save, unsigned plane)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	unsigned i;
+
+	NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, 1 << plane);
+	NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, plane);
+	for (i = 0; i < 16384; i++) {
+		if (save) {
+			dev_priv->saved_vga_font[plane][i] =
+					ioread32_native(iovram + i * 4);
+		} else {
+			iowrite32_native(dev_priv->saved_vga_font[plane][i],
+							iovram + i * 4);
+		}
+	}
+}
+
+void
+nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save)
+{
+	uint8_t misc, gr4, gr5, gr6, seq2, seq4;
+	bool graphicsmode;
+	unsigned plane;
+	void __iomem *iovram;
+
+	if (nv_two_heads(dev))
+		NVSetOwner(dev, 0);
+
+	NVSetEnablePalette(dev, 0, true);
+	graphicsmode = NVReadVgaAttr(dev, 0, NV_CIO_AR_MODE_INDEX) & 1;
+	NVSetEnablePalette(dev, 0, false);
+
+	if (graphicsmode) /* graphics mode => framebuffer => no need to save */
+		return;
+
+	NV_INFO(dev, "%sing VGA fonts\n", save ? "Sav" : "Restor");
+
+	/* map first 64KiB of VRAM, holds VGA fonts etc */
+	iovram = ioremap(pci_resource_start(dev->pdev, 1), 65536);
+	if (!iovram) {
+		NV_ERROR(dev, "Failed to map VRAM, "
+					"cannot save/restore VGA fonts.\n");
+		return;
+	}
+
+	if (nv_two_heads(dev))
+		NVBlankScreen(dev, 1, true);
+	NVBlankScreen(dev, 0, true);
+
+	/* save control regs */
+	misc = NVReadPRMVIO(dev, 0, NV_PRMVIO_MISC__READ);
+	seq2 = NVReadVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX);
+	seq4 = NVReadVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX);
+	gr4 = NVReadVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX);
+	gr5 = NVReadVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX);
+	gr6 = NVReadVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX);
+
+	NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, 0x67);
+	NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, 0x6);
+	NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, 0x0);
+	NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, 0x5);
+
+	/* store font in planes 0..3 */
+	for (plane = 0; plane < 4; plane++)
+		nouveau_vga_font_io(dev, iovram, save, plane);
+
+	/* restore control regs */
+	NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, misc);
+	NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, gr4);
+	NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, gr5);
+	NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, gr6);
+	NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, seq2);
+	NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, seq4);
+
+	if (nv_two_heads(dev))
+		NVBlankScreen(dev, 1, false);
+	NVBlankScreen(dev, 0, false);
+
+	iounmap(iovram);
+}
+
+/*
+ * mode state save/load
+ */
+
+static void
+rd_cio_state(struct drm_device *dev, int head,
+	     struct nv04_crtc_reg *crtcstate, int index)
+{
+	crtcstate->CRTC[index] = NVReadVgaCrtc(dev, head, index);
+}
+
+static void
+wr_cio_state(struct drm_device *dev, int head,
+	     struct nv04_crtc_reg *crtcstate, int index)
+{
+	NVWriteVgaCrtc(dev, head, index, crtcstate->CRTC[index]);
+}
+
+static void
+nv_save_state_ramdac(struct drm_device *dev, int head,
+		     struct nv04_mode_state *state)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
+	int i;
+
+	if (dev_priv->card_type >= NV_10)
+		regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
+
+	nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &regp->pllvals);
+	state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
+	if (nv_two_heads(dev))
+		state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
+	if (dev_priv->chipset == 0x11)
+		regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11);
+
+	regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL);
+
+	if (nv_gf4_disp_arch(dev))
+		regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630);
+	if (dev_priv->chipset >= 0x30)
+		regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634);
+
+	regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP);
+	regp->tv_vtotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL);
+	regp->tv_vskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW);
+	regp->tv_vsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY);
+	regp->tv_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL);
+	regp->tv_hskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW);
+	regp->tv_hsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY);
+	regp->tv_hsync_delay2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2);
+
+	for (i = 0; i < 7; i++) {
+		uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4);
+		regp->fp_vert_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg);
+		regp->fp_horiz_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg + 0x20);
+	}
+
+	if (nv_gf4_disp_arch(dev)) {
+		regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_FP_DITHER);
+		for (i = 0; i < 3; i++) {
+			regp->dither_regs[i] = NVReadRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4);
+			regp->dither_regs[i + 3] = NVReadRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4);
+		}
+	}
+
+	regp->fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
+	regp->fp_debug_0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0);
+	if (!nv_gf4_disp_arch(dev) && head == 0) {
+		/* early chips don't allow access to PRAMDAC_TMDS_* without
+		 * the head A FPCLK on (nv11 even locks up) */
+		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0 &
+			      ~NV_PRAMDAC_FP_DEBUG_0_PWRDOWN_FPCLK);
+	}
+	regp->fp_debug_1 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1);
+	regp->fp_debug_2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2);
+
+	regp->fp_margin_color = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR);
+
+	if (nv_gf4_disp_arch(dev))
+		regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0);
+
+	if (dev_priv->card_type == NV_40) {
+		regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20);
+		regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24);
+		regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34);
+
+		for (i = 0; i < 38; i++)
+			regp->ctv_regs[i] = NVReadRAMDAC(dev, head,
+							 NV_PRAMDAC_CTV + 4*i);
+	}
+}
+
+static void
+nv_load_state_ramdac(struct drm_device *dev, int head,
+		     struct nv04_mode_state *state)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
+	uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
+	int i;
+
+	if (dev_priv->card_type >= NV_10)
+		NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync);
+
+	nouveau_hw_setpll(dev, pllreg, &regp->pllvals);
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
+	if (nv_two_heads(dev))
+		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk);
+	if (dev_priv->chipset == 0x11)
+		NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither);
+
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl);
+
+	if (nv_gf4_disp_arch(dev))
+		NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630);
+	if (dev_priv->chipset >= 0x30)
+		NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634);
+
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL, regp->tv_vtotal);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW, regp->tv_vskew);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY, regp->tv_vsync_delay);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL, regp->tv_htotal);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW, regp->tv_hskew);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY, regp->tv_hsync_delay);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2, regp->tv_hsync_delay2);
+
+	for (i = 0; i < 7; i++) {
+		uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4);
+
+		NVWriteRAMDAC(dev, head, ramdac_reg, regp->fp_vert_regs[i]);
+		NVWriteRAMDAC(dev, head, ramdac_reg + 0x20, regp->fp_horiz_regs[i]);
+	}
+
+	if (nv_gf4_disp_arch(dev)) {
+		NVWriteRAMDAC(dev, head, NV_RAMDAC_FP_DITHER, regp->dither);
+		for (i = 0; i < 3; i++) {
+			NVWriteRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4, regp->dither_regs[i]);
+			NVWriteRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4, regp->dither_regs[i + 3]);
+		}
+	}
+
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, regp->fp_control);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regp->fp_debug_1);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2, regp->fp_debug_2);
+
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR, regp->fp_margin_color);
+
+	if (nv_gf4_disp_arch(dev))
+		NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0);
+
+	if (dev_priv->card_type == NV_40) {
+		NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20);
+		NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24);
+		NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34);
+
+		for (i = 0; i < 38; i++)
+			NVWriteRAMDAC(dev, head,
+				      NV_PRAMDAC_CTV + 4*i, regp->ctv_regs[i]);
+	}
+}
+
+static void
+nv_save_state_vga(struct drm_device *dev, int head,
+		  struct nv04_mode_state *state)
+{
+	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
+	int i;
+
+	regp->MiscOutReg = NVReadPRMVIO(dev, head, NV_PRMVIO_MISC__READ);
+
+	for (i = 0; i < 25; i++)
+		rd_cio_state(dev, head, regp, i);
+
+	NVSetEnablePalette(dev, head, true);
+	for (i = 0; i < 21; i++)
+		regp->Attribute[i] = NVReadVgaAttr(dev, head, i);
+	NVSetEnablePalette(dev, head, false);
+
+	for (i = 0; i < 9; i++)
+		regp->Graphics[i] = NVReadVgaGr(dev, head, i);
+
+	for (i = 0; i < 5; i++)
+		regp->Sequencer[i] = NVReadVgaSeq(dev, head, i);
+}
+
+static void
+nv_load_state_vga(struct drm_device *dev, int head,
+		  struct nv04_mode_state *state)
+{
+	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
+	int i;
+
+	NVWritePRMVIO(dev, head, NV_PRMVIO_MISC__WRITE, regp->MiscOutReg);
+
+	for (i = 0; i < 5; i++)
+		NVWriteVgaSeq(dev, head, i, regp->Sequencer[i]);
+
+	nv_lock_vga_crtc_base(dev, head, false);
+	for (i = 0; i < 25; i++)
+		wr_cio_state(dev, head, regp, i);
+	nv_lock_vga_crtc_base(dev, head, true);
+
+	for (i = 0; i < 9; i++)
+		NVWriteVgaGr(dev, head, i, regp->Graphics[i]);
+
+	NVSetEnablePalette(dev, head, true);
+	for (i = 0; i < 21; i++)
+		NVWriteVgaAttr(dev, head, i, regp->Attribute[i]);
+	NVSetEnablePalette(dev, head, false);
+}
+
+static void
+nv_save_state_ext(struct drm_device *dev, int head,
+		  struct nv04_mode_state *state)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
+	int i;
+
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX);
+
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
+	if (dev_priv->card_type >= NV_30)
+		rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
+
+	if (dev_priv->card_type >= NV_10) {
+		regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830);
+		regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834);
+
+		if (dev_priv->card_type >= NV_30)
+			regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT);
+
+		if (dev_priv->card_type == NV_40)
+			regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850);
+
+		if (nv_two_heads(dev))
+			regp->crtc_eng_ctrl = NVReadCRTC(dev, head, NV_PCRTC_ENGINE_CTRL);
+		regp->cursor_cfg = NVReadCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG);
+	}
+
+	regp->crtc_cfg = NVReadCRTC(dev, head, NV_PCRTC_CONFIG);
+
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
+	if (dev_priv->card_type >= NV_10) {
+		rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
+		rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
+		rd_cio_state(dev, head, regp, NV_CIO_CRE_4B);
+		rd_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY);
+	}
+	/* NV11 and NV20 don't have this, they stop at 0x52. */
+	if (nv_gf4_disp_arch(dev)) {
+		rd_cio_state(dev, head, regp, NV_CIO_CRE_53);
+		rd_cio_state(dev, head, regp, NV_CIO_CRE_54);
+
+		for (i = 0; i < 0x10; i++)
+			regp->CR58[i] = NVReadVgaCrtc5758(dev, head, i);
+		rd_cio_state(dev, head, regp, NV_CIO_CRE_59);
+		rd_cio_state(dev, head, regp, NV_CIO_CRE_5B);
+
+		rd_cio_state(dev, head, regp, NV_CIO_CRE_85);
+		rd_cio_state(dev, head, regp, NV_CIO_CRE_86);
+	}
+
+	regp->fb_start = NVReadCRTC(dev, head, NV_PCRTC_START);
+}
+
+static void
+nv_load_state_ext(struct drm_device *dev, int head,
+		  struct nv04_mode_state *state)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
+	uint32_t reg900;
+	int i;
+
+	if (dev_priv->card_type >= NV_10) {
+		if (nv_two_heads(dev))
+			/* setting ENGINE_CTRL (EC) *must* come before
+			 * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in
+			 * EC that should not be overwritten by writing stale EC
+			 */
+			NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl);
+
+		nvWriteVIDEO(dev, NV_PVIDEO_STOP, 1);
+		nvWriteVIDEO(dev, NV_PVIDEO_INTR_EN, 0);
+		nvWriteVIDEO(dev, NV_PVIDEO_OFFSET_BUFF(0), 0);
+		nvWriteVIDEO(dev, NV_PVIDEO_OFFSET_BUFF(1), 0);
+		nvWriteVIDEO(dev, NV_PVIDEO_LIMIT(0), dev_priv->fb_available_size - 1);
+		nvWriteVIDEO(dev, NV_PVIDEO_LIMIT(1), dev_priv->fb_available_size - 1);
+		nvWriteVIDEO(dev, NV_PVIDEO_UVPLANE_LIMIT(0), dev_priv->fb_available_size - 1);
+		nvWriteVIDEO(dev, NV_PVIDEO_UVPLANE_LIMIT(1), dev_priv->fb_available_size - 1);
+		nvWriteMC(dev, NV_PBUS_POWERCTRL_2, 0);
+
+		NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
+		NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830);
+		NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834);
+
+		if (dev_priv->card_type >= NV_30)
+			NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext);
+
+		if (dev_priv->card_type == NV_40) {
+			NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
+
+			reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
+			if (regp->crtc_cfg == NV_PCRTC_CONFIG_START_ADDRESS_HSYNC)
+				NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000);
+			else
+				NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000);
+		}
+	}
+
+	NVWriteCRTC(dev, head, NV_PCRTC_CONFIG, regp->crtc_cfg);
+
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX);
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX);
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX);
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX);
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX);
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX);
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX);
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
+	if (dev_priv->card_type >= NV_30)
+		wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
+
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
+	if (dev_priv->card_type == NV_40)
+		nv_fix_nv40_hw_cursor(dev, head);
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
+
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
+	if (dev_priv->card_type >= NV_10) {
+		wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
+		wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
+		wr_cio_state(dev, head, regp, NV_CIO_CRE_4B);
+		wr_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY);
+	}
+	/* NV11 and NV20 stop at 0x52. */
+	if (nv_gf4_disp_arch(dev)) {
+		if (dev_priv->card_type == NV_10) {
+			/* Not waiting for vertical retrace before modifying
+			   CRE_53/CRE_54 causes lockups. */
+			nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
+			nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
+		}
+
+		wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
+		wr_cio_state(dev, head, regp, NV_CIO_CRE_54);
+
+		for (i = 0; i < 0x10; i++)
+			NVWriteVgaCrtc5758(dev, head, i, regp->CR58[i]);
+		wr_cio_state(dev, head, regp, NV_CIO_CRE_59);
+		wr_cio_state(dev, head, regp, NV_CIO_CRE_5B);
+
+		wr_cio_state(dev, head, regp, NV_CIO_CRE_85);
+		wr_cio_state(dev, head, regp, NV_CIO_CRE_86);
+	}
+
+	NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
+
+	/* Setting 1 on this value gives you interrupts for every vblank period. */
+	NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, 0);
+	NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK);
+}
+
+static void
+nv_save_state_palette(struct drm_device *dev, int head,
+		      struct nv04_mode_state *state)
+{
+	int head_offset = head * NV_PRMDIO_SIZE, i;
+
+	nv_wr08(dev, NV_PRMDIO_PIXEL_MASK + head_offset,
+				NV_PRMDIO_PIXEL_MASK_MASK);
+	nv_wr08(dev, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0);
+
+	for (i = 0; i < 768; i++) {
+		state->crtc_reg[head].DAC[i] = nv_rd08(dev,
+				NV_PRMDIO_PALETTE_DATA + head_offset);
+	}
+
+	NVSetEnablePalette(dev, head, false);
+}
+
+void
+nouveau_hw_load_state_palette(struct drm_device *dev, int head,
+			      struct nv04_mode_state *state)
+{
+	int head_offset = head * NV_PRMDIO_SIZE, i;
+
+	nv_wr08(dev, NV_PRMDIO_PIXEL_MASK + head_offset,
+				NV_PRMDIO_PIXEL_MASK_MASK);
+	nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0);
+
+	for (i = 0; i < 768; i++) {
+		nv_wr08(dev, NV_PRMDIO_PALETTE_DATA + head_offset,
+				state->crtc_reg[head].DAC[i]);
+	}
+
+	NVSetEnablePalette(dev, head, false);
+}
+
+void nouveau_hw_save_state(struct drm_device *dev, int head,
+			   struct nv04_mode_state *state)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->chipset == 0x11)
+		/* NB: no attempt is made to restore the bad pll later on */
+		nouveau_hw_fix_bad_vpll(dev, head);
+	nv_save_state_ramdac(dev, head, state);
+	nv_save_state_vga(dev, head, state);
+	nv_save_state_palette(dev, head, state);
+	nv_save_state_ext(dev, head, state);
+}
+
+void nouveau_hw_load_state(struct drm_device *dev, int head,
+			   struct nv04_mode_state *state)
+{
+	NVVgaProtect(dev, head, true);
+	nv_load_state_ramdac(dev, head, state);
+	nv_load_state_ext(dev, head, state);
+	nouveau_hw_load_state_palette(dev, head, state);
+	nv_load_state_vga(dev, head, state);
+	NVVgaProtect(dev, head, false);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.h b/drivers/gpu/drm/nouveau/nouveau_hw.h
new file mode 100644
index 0000000..869130f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.h
@@ -0,0 +1,455 @@
+/*
+ * Copyright 2008 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_HW_H__
+#define __NOUVEAU_HW_H__
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+
+#define MASK(field) ( \
+	(0xffffffff >> (31 - ((1 ? field) - (0 ? field)))) << (0 ? field))
+
+#define XLATE(src, srclowbit, outfield) ( \
+	(((src) >> (srclowbit)) << (0 ? outfield)) & MASK(outfield))
+
+void NVWriteVgaSeq(struct drm_device *, int head, uint8_t index, uint8_t value);
+uint8_t NVReadVgaSeq(struct drm_device *, int head, uint8_t index);
+void NVWriteVgaGr(struct drm_device *, int head, uint8_t index, uint8_t value);
+uint8_t NVReadVgaGr(struct drm_device *, int head, uint8_t index);
+void NVSetOwner(struct drm_device *, int owner);
+void NVBlankScreen(struct drm_device *, int head, bool blank);
+void nouveau_hw_setpll(struct drm_device *, uint32_t reg1,
+		       struct nouveau_pll_vals *pv);
+int nouveau_hw_get_pllvals(struct drm_device *, enum pll_types plltype,
+			   struct nouveau_pll_vals *pllvals);
+int nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pllvals);
+int nouveau_hw_get_clock(struct drm_device *, enum pll_types plltype);
+void nouveau_hw_save_vga_fonts(struct drm_device *, bool save);
+void nouveau_hw_save_state(struct drm_device *, int head,
+			   struct nv04_mode_state *state);
+void nouveau_hw_load_state(struct drm_device *, int head,
+			   struct nv04_mode_state *state);
+void nouveau_hw_load_state_palette(struct drm_device *, int head,
+				   struct nv04_mode_state *state);
+
+/* nouveau_calc.c */
+extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp,
+			     int *burst, int *lwm);
+extern int nouveau_calc_pll_mnp(struct drm_device *, struct pll_lims *pll_lim,
+				int clk, struct nouveau_pll_vals *pv);
+
+static inline uint32_t
+nvReadMC(struct drm_device *dev, uint32_t reg)
+{
+	uint32_t val = nv_rd32(dev, reg);
+	NV_REG_DEBUG(MC, dev, "reg %08x val %08x\n", reg, val);
+	return val;
+}
+
+static inline void
+nvWriteMC(struct drm_device *dev, uint32_t reg, uint32_t val)
+{
+	NV_REG_DEBUG(MC, dev, "reg %08x val %08x\n", reg, val);
+	nv_wr32(dev, reg, val);
+}
+
+static inline uint32_t
+nvReadVIDEO(struct drm_device *dev, uint32_t reg)
+{
+	uint32_t val = nv_rd32(dev, reg);
+	NV_REG_DEBUG(VIDEO, dev, "reg %08x val %08x\n", reg, val);
+	return val;
+}
+
+static inline void
+nvWriteVIDEO(struct drm_device *dev, uint32_t reg, uint32_t val)
+{
+	NV_REG_DEBUG(VIDEO, dev, "reg %08x val %08x\n", reg, val);
+	nv_wr32(dev, reg, val);
+}
+
+static inline uint32_t
+nvReadFB(struct drm_device *dev, uint32_t reg)
+{
+	uint32_t val = nv_rd32(dev, reg);
+	NV_REG_DEBUG(FB, dev, "reg %08x val %08x\n", reg, val);
+	return val;
+}
+
+static inline void
+nvWriteFB(struct drm_device *dev, uint32_t reg, uint32_t val)
+{
+	NV_REG_DEBUG(FB, dev, "reg %08x val %08x\n", reg, val);
+	nv_wr32(dev, reg, val);
+}
+
+static inline uint32_t
+nvReadEXTDEV(struct drm_device *dev, uint32_t reg)
+{
+	uint32_t val = nv_rd32(dev, reg);
+	NV_REG_DEBUG(EXTDEV, dev, "reg %08x val %08x\n", reg, val);
+	return val;
+}
+
+static inline void
+nvWriteEXTDEV(struct drm_device *dev, uint32_t reg, uint32_t val)
+{
+	NV_REG_DEBUG(EXTDEV, dev, "reg %08x val %08x\n", reg, val);
+	nv_wr32(dev, reg, val);
+}
+
+static inline uint32_t NVReadCRTC(struct drm_device *dev,
+					int head, uint32_t reg)
+{
+	uint32_t val;
+	if (head)
+		reg += NV_PCRTC0_SIZE;
+	val = nv_rd32(dev, reg);
+	NV_REG_DEBUG(CRTC, dev, "head %d reg %08x val %08x\n", head, reg, val);
+	return val;
+}
+
+static inline void NVWriteCRTC(struct drm_device *dev,
+					int head, uint32_t reg, uint32_t val)
+{
+	if (head)
+		reg += NV_PCRTC0_SIZE;
+	NV_REG_DEBUG(CRTC, dev, "head %d reg %08x val %08x\n", head, reg, val);
+	nv_wr32(dev, reg, val);
+}
+
+static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
+					int head, uint32_t reg)
+{
+	uint32_t val;
+	if (head)
+		reg += NV_PRAMDAC0_SIZE;
+	val = nv_rd32(dev, reg);
+	NV_REG_DEBUG(RAMDAC, dev, "head %d reg %08x val %08x\n",
+							head, reg, val);
+	return val;
+}
+
+static inline void NVWriteRAMDAC(struct drm_device *dev,
+					int head, uint32_t reg, uint32_t val)
+{
+	if (head)
+		reg += NV_PRAMDAC0_SIZE;
+	NV_REG_DEBUG(RAMDAC, dev, "head %d reg %08x val %08x\n",
+							head, reg, val);
+	nv_wr32(dev, reg, val);
+}
+
+static inline uint8_t nv_read_tmds(struct drm_device *dev,
+					int or, int dl, uint8_t address)
+{
+	int ramdac = (or & OUTPUT_C) >> 2;
+
+	NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8,
+	NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | address);
+	return NVReadRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA + dl * 8);
+}
+
+static inline void nv_write_tmds(struct drm_device *dev,
+					int or, int dl, uint8_t address,
+					uint8_t data)
+{
+	int ramdac = (or & OUTPUT_C) >> 2;
+
+	NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA + dl * 8, data);
+	NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8, address);
+}
+
+static inline void NVWriteVgaCrtc(struct drm_device *dev,
+					int head, uint8_t index, uint8_t value)
+{
+	NV_REG_DEBUG(VGACRTC, dev, "head %d index 0x%02x data 0x%02x\n",
+							head, index, value);
+	nv_wr08(dev, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
+	nv_wr08(dev, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
+}
+
+static inline uint8_t NVReadVgaCrtc(struct drm_device *dev,
+					int head, uint8_t index)
+{
+	uint8_t val;
+	nv_wr08(dev, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
+	val = nv_rd08(dev, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE);
+	NV_REG_DEBUG(VGACRTC, dev, "head %d index 0x%02x data 0x%02x\n",
+							head, index, val);
+	return val;
+}
+
+/* CR57 and CR58 are a fun pair of regs. CR57 provides an index (0-0xf) for CR58
+ * I suspect they in fact do nothing, but are merely a way to carry useful
+ * per-head variables around
+ *
+ * Known uses:
+ * CR57		CR58
+ * 0x00		index to the appropriate dcb entry (or 7f for inactive)
+ * 0x02		dcb entry's "or" value (or 00 for inactive)
+ * 0x03		bit0 set for dual link (LVDS, possibly elsewhere too)
+ * 0x08 or 0x09	pxclk in MHz
+ * 0x0f		laptop panel info -	low nibble for PEXTDEV_BOOT_0 strap
+ * 					high nibble for xlat strap value
+ */
+
+static inline void
+NVWriteVgaCrtc5758(struct drm_device *dev, int head, uint8_t index, uint8_t value)
+{
+	NVWriteVgaCrtc(dev, head, NV_CIO_CRE_57, index);
+	NVWriteVgaCrtc(dev, head, NV_CIO_CRE_58, value);
+}
+
+static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_t index)
+{
+	NVWriteVgaCrtc(dev, head, NV_CIO_CRE_57, index);
+	return NVReadVgaCrtc(dev, head, NV_CIO_CRE_58);
+}
+
+static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
+					int head, uint32_t reg)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint8_t val;
+
+	/* Only NV4x have two pvio ranges; other twoHeads cards MUST call
+	 * NVSetOwner for the relevant head to be programmed */
+	if (head && dev_priv->card_type == NV_40)
+		reg += NV_PRMVIO_SIZE;
+
+	val = nv_rd08(dev, reg);
+	NV_REG_DEBUG(RMVIO, dev, "head %d reg %08x val %02x\n", head, reg, val);
+	return val;
+}
+
+static inline void NVWritePRMVIO(struct drm_device *dev,
+					int head, uint32_t reg, uint8_t value)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	/* Only NV4x have two pvio ranges; other twoHeads cards MUST call
+	 * NVSetOwner for the relevant head to be programmed */
+	if (head && dev_priv->card_type == NV_40)
+		reg += NV_PRMVIO_SIZE;
+
+	NV_REG_DEBUG(RMVIO, dev, "head %d reg %08x val %02x\n",
+						head, reg, value);
+	nv_wr08(dev, reg, value);
+}
+
+static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable)
+{
+	nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+	nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20);
+}
+
+static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
+{
+	nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+	return !(nv_rd08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20);
+}
+
+static inline void NVWriteVgaAttr(struct drm_device *dev,
+					int head, uint8_t index, uint8_t value)
+{
+	if (NVGetEnablePalette(dev, head))
+		index &= ~0x20;
+	else
+		index |= 0x20;
+
+	nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+	NV_REG_DEBUG(VGAATTR, dev, "head %d index 0x%02x data 0x%02x\n",
+							head, index, value);
+	nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
+	nv_wr08(dev, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value);
+}
+
+static inline uint8_t NVReadVgaAttr(struct drm_device *dev,
+					int head, uint8_t index)
+{
+	uint8_t val;
+	if (NVGetEnablePalette(dev, head))
+		index &= ~0x20;
+	else
+		index |= 0x20;
+
+	nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+	nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
+	val = nv_rd08(dev, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE);
+	NV_REG_DEBUG(VGAATTR, dev, "head %d index 0x%02x data 0x%02x\n",
+							head, index, val);
+	return val;
+}
+
+static inline void NVVgaSeqReset(struct drm_device *dev, int head, bool start)
+{
+	NVWriteVgaSeq(dev, head, NV_VIO_SR_RESET_INDEX, start ? 0x1 : 0x3);
+}
+
+static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect)
+{
+	uint8_t seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX);
+
+	if (protect) {
+		NVVgaSeqReset(dev, head, true);
+		NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20);
+	} else {
+		/* Reenable sequencer, then turn on screen */
+		NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20);   /* reenable display */
+		NVVgaSeqReset(dev, head, false);
+	}
+	NVSetEnablePalette(dev, head, protect);
+}
+
+static inline bool
+nv_heads_tied(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->chipset == 0x11)
+		return !!(nvReadMC(dev, NV_PBUS_DEBUG_1) & (1 << 28));
+
+	return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4;
+}
+
+/* makes cr0-7 on the specified head read-only */
+static inline bool
+nv_lock_vga_crtc_base(struct drm_device *dev, int head, bool lock)
+{
+	uint8_t cr11 = NVReadVgaCrtc(dev, head, NV_CIO_CR_VRE_INDEX);
+	bool waslocked = cr11 & 0x80;
+
+	if (lock)
+		cr11 |= 0x80;
+	else
+		cr11 &= ~0x80;
+	NVWriteVgaCrtc(dev, head, NV_CIO_CR_VRE_INDEX, cr11);
+
+	return waslocked;
+}
+
+static inline void
+nv_lock_vga_crtc_shadow(struct drm_device *dev, int head, int lock)
+{
+	/* shadow lock: connects 0x60?3d? regs to "real" 0x3d? regs
+	 * bit7: unlocks HDT, HBS, HBE, HRS, HRE, HEB
+	 * bit6: seems to have some effect on CR09 (double scan, VBS_9)
+	 * bit5: unlocks HDE
+	 * bit4: unlocks VDE
+	 * bit3: unlocks VDT, OVL, VRS, ?VRE?, VBS, VBE, LSR, EBR
+	 * bit2: same as bit 1 of 0x60?804
+	 * bit0: same as bit 0 of 0x60?804
+	 */
+
+	uint8_t cr21 = lock;
+
+	if (lock < 0)
+		/* 0xfa is generic "unlock all" mask */
+		cr21 = NVReadVgaCrtc(dev, head, NV_CIO_CRE_21) | 0xfa;
+
+	NVWriteVgaCrtc(dev, head, NV_CIO_CRE_21, cr21);
+}
+
+/* renders the extended crtc regs (cr19+) on all crtcs impervious:
+ * immutable and unreadable
+ */
+static inline bool
+NVLockVgaCrtcs(struct drm_device *dev, bool lock)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	bool waslocked = !NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX);
+
+	NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX,
+		       lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE);
+	/* NV11 has independently lockable extended crtcs, except when tied */
+	if (dev_priv->chipset == 0x11 && !nv_heads_tied(dev))
+		NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX,
+			       lock ? NV_CIO_SR_LOCK_VALUE :
+				      NV_CIO_SR_UNLOCK_RW_VALUE);
+
+	return waslocked;
+}
+
+/* nv04 cursor max dimensions of 32x32 (A1R5G5B5) */
+#define NV04_CURSOR_SIZE 32
+/* limit nv10 cursors to 64x64 (ARGB8) (we could go to 64x255) */
+#define NV10_CURSOR_SIZE 64
+
+static inline int nv_cursor_width(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	return dev_priv->card_type >= NV_10 ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE;
+}
+
+static inline void
+nv_fix_nv40_hw_cursor(struct drm_device *dev, int head)
+{
+	/* on some nv40 (such as the "true" (in the NV_PFB_BOOT_0 sense) nv40,
+	 * the gf6800gt) a hardware bug requires a write to PRAMDAC_CURSOR_POS
+	 * for changes to the CRTC CURCTL regs to take effect, whether changing
+	 * the pixmap location, or just showing/hiding the cursor
+	 */
+	uint32_t curpos = NVReadRAMDAC(dev, head, NV_PRAMDAC_CU_START_POS);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_CU_START_POS, curpos);
+}
+
+static inline void
+nv_show_cursor(struct drm_device *dev, int head, bool show)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint8_t *curctl1 =
+		&dev_priv->mode_reg.crtc_reg[head].CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX];
+
+	if (show)
+		*curctl1 |= MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
+	else
+		*curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
+	NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1);
+
+	if (dev_priv->card_type == NV_40)
+		nv_fix_nv40_hw_cursor(dev, head);
+}
+
+static inline uint32_t
+nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int mask;
+
+	if (bpp == 15)
+		bpp = 16;
+	if (bpp == 24)
+		bpp = 8;
+
+	/* Alignment requirements taken from the Haiku driver */
+	if (dev_priv->card_type == NV_04)
+		mask = 128 / bpp - 1;
+	else
+		mask = 512 / bpp - 1;
+
+	return (width + mask) & ~mask;
+}
+
+#endif	/* __NOUVEAU_HW_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
new file mode 100644
index 0000000..70e994d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_i2c.h"
+#include "nouveau_hw.h"
+
+static void
+nv04_i2c_setscl(void *data, int state)
+{
+	struct nouveau_i2c_chan *i2c = data;
+	struct drm_device *dev = i2c->dev;
+	uint8_t val;
+
+	val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
+	NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
+}
+
+static void
+nv04_i2c_setsda(void *data, int state)
+{
+	struct nouveau_i2c_chan *i2c = data;
+	struct drm_device *dev = i2c->dev;
+	uint8_t val;
+
+	val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
+	NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
+}
+
+static int
+nv04_i2c_getscl(void *data)
+{
+	struct nouveau_i2c_chan *i2c = data;
+	struct drm_device *dev = i2c->dev;
+
+	return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 4);
+}
+
+static int
+nv04_i2c_getsda(void *data)
+{
+	struct nouveau_i2c_chan *i2c = data;
+	struct drm_device *dev = i2c->dev;
+
+	return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 8);
+}
+
+static void
+nv4e_i2c_setscl(void *data, int state)
+{
+	struct nouveau_i2c_chan *i2c = data;
+	struct drm_device *dev = i2c->dev;
+	uint8_t val;
+
+	val = (nv_rd32(dev, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
+	nv_wr32(dev, i2c->wr, val | 0x01);
+}
+
+static void
+nv4e_i2c_setsda(void *data, int state)
+{
+	struct nouveau_i2c_chan *i2c = data;
+	struct drm_device *dev = i2c->dev;
+	uint8_t val;
+
+	val = (nv_rd32(dev, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
+	nv_wr32(dev, i2c->wr, val | 0x01);
+}
+
+static int
+nv4e_i2c_getscl(void *data)
+{
+	struct nouveau_i2c_chan *i2c = data;
+	struct drm_device *dev = i2c->dev;
+
+	return !!((nv_rd32(dev, i2c->rd) >> 16) & 4);
+}
+
+static int
+nv4e_i2c_getsda(void *data)
+{
+	struct nouveau_i2c_chan *i2c = data;
+	struct drm_device *dev = i2c->dev;
+
+	return !!((nv_rd32(dev, i2c->rd) >> 16) & 8);
+}
+
+static int
+nv50_i2c_getscl(void *data)
+{
+	struct nouveau_i2c_chan *i2c = data;
+	struct drm_device *dev = i2c->dev;
+
+	return !!(nv_rd32(dev, i2c->rd) & 1);
+}
+
+
+static int
+nv50_i2c_getsda(void *data)
+{
+	struct nouveau_i2c_chan *i2c = data;
+	struct drm_device *dev = i2c->dev;
+
+	return !!(nv_rd32(dev, i2c->rd) & 2);
+}
+
+static void
+nv50_i2c_setscl(void *data, int state)
+{
+	struct nouveau_i2c_chan *i2c = data;
+	struct drm_device *dev = i2c->dev;
+
+	nv_wr32(dev, i2c->wr, 4 | (i2c->data ? 2 : 0) | (state ? 1 : 0));
+}
+
+static void
+nv50_i2c_setsda(void *data, int state)
+{
+	struct nouveau_i2c_chan *i2c = data;
+	struct drm_device *dev = i2c->dev;
+
+	nv_wr32(dev, i2c->wr,
+			(nv_rd32(dev, i2c->rd) & 1) | 4 | (state ? 2 : 0));
+	i2c->data = state;
+}
+
+static const uint32_t nv50_i2c_port[] = {
+	0x00e138, 0x00e150, 0x00e168, 0x00e180,
+	0x00e254, 0x00e274, 0x00e764, 0x00e780,
+	0x00e79c, 0x00e7b8
+};
+#define NV50_I2C_PORTS ARRAY_SIZE(nv50_i2c_port)
+
+int
+nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_i2c_chan *i2c;
+	int ret;
+
+	if (entry->chan)
+		return -EEXIST;
+
+	if (dev_priv->card_type == NV_50 && entry->read >= NV50_I2C_PORTS) {
+		NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
+		return -EINVAL;
+	}
+
+	i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
+	if (i2c == NULL)
+		return -ENOMEM;
+
+	switch (entry->port_type) {
+	case 0:
+		i2c->algo.bit.setsda = nv04_i2c_setsda;
+		i2c->algo.bit.setscl = nv04_i2c_setscl;
+		i2c->algo.bit.getsda = nv04_i2c_getsda;
+		i2c->algo.bit.getscl = nv04_i2c_getscl;
+		i2c->rd = entry->read;
+		i2c->wr = entry->write;
+		break;
+	case 4:
+		i2c->algo.bit.setsda = nv4e_i2c_setsda;
+		i2c->algo.bit.setscl = nv4e_i2c_setscl;
+		i2c->algo.bit.getsda = nv4e_i2c_getsda;
+		i2c->algo.bit.getscl = nv4e_i2c_getscl;
+		i2c->rd = 0x600800 + entry->read;
+		i2c->wr = 0x600800 + entry->write;
+		break;
+	case 5:
+		i2c->algo.bit.setsda = nv50_i2c_setsda;
+		i2c->algo.bit.setscl = nv50_i2c_setscl;
+		i2c->algo.bit.getsda = nv50_i2c_getsda;
+		i2c->algo.bit.getscl = nv50_i2c_getscl;
+		i2c->rd = nv50_i2c_port[entry->read];
+		i2c->wr = i2c->rd;
+		break;
+	case 6:
+		i2c->rd = entry->read;
+		i2c->wr = entry->write;
+		break;
+	default:
+		NV_ERROR(dev, "DCB I2C port type %d unknown\n",
+			 entry->port_type);
+		kfree(i2c);
+		return -EINVAL;
+	}
+
+	snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+		 "nouveau-%s-%d", pci_name(dev->pdev), index);
+	i2c->adapter.owner = THIS_MODULE;
+	i2c->adapter.dev.parent = &dev->pdev->dev;
+	i2c->dev = dev;
+	i2c_set_adapdata(&i2c->adapter, i2c);
+
+	if (entry->port_type < 6) {
+		i2c->adapter.algo_data = &i2c->algo.bit;
+		i2c->algo.bit.udelay = 40;
+		i2c->algo.bit.timeout = usecs_to_jiffies(5000);
+		i2c->algo.bit.data = i2c;
+		ret = i2c_bit_add_bus(&i2c->adapter);
+	} else {
+		i2c->adapter.algo_data = &i2c->algo.dp;
+		i2c->algo.dp.running = false;
+		i2c->algo.dp.address = 0;
+		i2c->algo.dp.aux_ch = nouveau_dp_i2c_aux_ch;
+		ret = i2c_dp_aux_add_bus(&i2c->adapter);
+	}
+
+	if (ret) {
+		NV_ERROR(dev, "Failed to register i2c %d\n", index);
+		kfree(i2c);
+		return ret;
+	}
+
+	entry->chan = i2c;
+	return 0;
+}
+
+void
+nouveau_i2c_fini(struct drm_device *dev, struct dcb_i2c_entry *entry)
+{
+	if (!entry->chan)
+		return;
+
+	i2c_del_adapter(&entry->chan->adapter);
+	kfree(entry->chan);
+	entry->chan = NULL;
+}
+
+struct nouveau_i2c_chan *
+nouveau_i2c_find(struct drm_device *dev, int index)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->VBIOS;
+
+	if (index > DCB_MAX_NUM_I2C_ENTRIES)
+		return NULL;
+
+	if (!bios->bdcb.dcb.i2c[index].chan) {
+		if (nouveau_i2c_init(dev, &bios->bdcb.dcb.i2c[index], index))
+			return NULL;
+	}
+
+	return bios->bdcb.dcb.i2c[index].chan;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h
new file mode 100644
index 0000000..c8eaf7a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_I2C_H__
+#define __NOUVEAU_I2C_H__
+
+#include <linux/i2c.h>
+#include <linux/i2c-id.h>
+#include <linux/i2c-algo-bit.h>
+#include "drm_dp_helper.h"
+
+struct dcb_i2c_entry;
+
+struct nouveau_i2c_chan {
+	struct i2c_adapter adapter;
+	struct drm_device *dev;
+	union {
+		struct i2c_algo_bit_data bit;
+		struct i2c_algo_dp_aux_data dp;
+	} algo;
+	unsigned rd;
+	unsigned wr;
+	unsigned data;
+};
+
+int nouveau_i2c_init(struct drm_device *, struct dcb_i2c_entry *, int index);
+void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *);
+struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index);
+
+int nouveau_dp_i2c_aux_ch(struct i2c_adapter *, int mode, uint8_t write_byte,
+			  uint8_t *read_byte);
+
+#endif /* __NOUVEAU_I2C_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
new file mode 100644
index 0000000..a2c30f4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
@@ -0,0 +1,72 @@
+/**
+ * \file mga_ioc32.c
+ *
+ * 32-bit ioctl compatibility routines for the MGA DRM.
+ *
+ * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
+ *
+ *
+ * Copyright (C) Paul Mackerras 2005
+ * Copyright (C) Egbert Eich 2003,2004
+ * Copyright (C) Dave Airlie 2005
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/compat.h>
+
+#include "drmP.h"
+#include "drm.h"
+
+#include "nouveau_drv.h"
+
+/**
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/dri/card<n>.
+ *
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ */
+long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
+			 unsigned long arg)
+{
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+	drm_ioctl_compat_t *fn = NULL;
+	int ret;
+
+	if (nr < DRM_COMMAND_BASE)
+		return drm_compat_ioctl(filp, cmd, arg);
+
+#if 0
+	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
+		fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE];
+#endif
+	lock_kernel();	  /* XXX for now */
+	if (fn != NULL)
+		ret = (*fn)(filp, cmd, arg);
+	else
+		ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
+	unlock_kernel();
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
new file mode 100644
index 0000000..370c72c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -0,0 +1,702 @@
+/*
+ * Copyright (C) 2006 Ben Skeggs.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ *   Ben Skeggs <darktama@iinet.net.au>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_reg.h"
+#include <linux/ratelimit.h>
+
+/* needed for hotplug irq */
+#include "nouveau_connector.h"
+#include "nv50_display.h"
+
+void
+nouveau_irq_preinstall(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	/* Master disable */
+	nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
+
+	if (dev_priv->card_type == NV_50) {
+		INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
+		INIT_LIST_HEAD(&dev_priv->vbl_waiting);
+	}
+}
+
+int
+nouveau_irq_postinstall(struct drm_device *dev)
+{
+	/* Master enable */
+	nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
+	return 0;
+}
+
+void
+nouveau_irq_uninstall(struct drm_device *dev)
+{
+	/* Master disable */
+	nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
+}
+
+static int
+nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data)
+{
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+	struct nouveau_pgraph_object_method *grm;
+	struct nouveau_pgraph_object_class *grc;
+
+	grc = dev_priv->engine.graph.grclass;
+	while (grc->id) {
+		if (grc->id == class)
+			break;
+		grc++;
+	}
+
+	if (grc->id != class || !grc->methods)
+		return -ENOENT;
+
+	grm = grc->methods;
+	while (grm->id) {
+		if (grm->id == mthd)
+			return grm->exec(chan, class, mthd, data);
+		grm++;
+	}
+
+	return -ENOENT;
+}
+
+static bool
+nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
+{
+	struct drm_device *dev = chan->dev;
+	const int subc = (addr >> 13) & 0x7;
+	const int mthd = addr & 0x1ffc;
+
+	if (mthd == 0x0000) {
+		struct nouveau_gpuobj_ref *ref = NULL;
+
+		if (nouveau_gpuobj_ref_find(chan, data, &ref))
+			return false;
+
+		if (ref->gpuobj->engine != NVOBJ_ENGINE_SW)
+			return false;
+
+		chan->sw_subchannel[subc] = ref->gpuobj->class;
+		nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
+			NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
+		return true;
+	}
+
+	/* hw object */
+	if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4)))
+		return false;
+
+	if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data))
+		return false;
+
+	return true;
+}
+
+static void
+nouveau_fifo_irq_handler(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_engine *engine = &dev_priv->engine;
+	uint32_t status, reassign;
+	int cnt = 0;
+
+	reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
+	while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
+		struct nouveau_channel *chan = NULL;
+		uint32_t chid, get;
+
+		nv_wr32(dev, NV03_PFIFO_CACHES, 0);
+
+		chid = engine->fifo.channel_id(dev);
+		if (chid >= 0 && chid < engine->fifo.channels)
+			chan = dev_priv->fifos[chid];
+		get  = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
+
+		if (status & NV_PFIFO_INTR_CACHE_ERROR) {
+			uint32_t mthd, data;
+			int ptr;
+
+			/* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
+			 * wrapping on my G80 chips, but CACHE1 isn't big
+			 * enough for this much data.. Tests show that it
+			 * wraps around to the start at GET=0x800.. No clue
+			 * as to why..
+			 */
+			ptr = (get & 0x7ff) >> 2;
+
+			if (dev_priv->card_type < NV_40) {
+				mthd = nv_rd32(dev,
+					NV04_PFIFO_CACHE1_METHOD(ptr));
+				data = nv_rd32(dev,
+					NV04_PFIFO_CACHE1_DATA(ptr));
+			} else {
+				mthd = nv_rd32(dev,
+					NV40_PFIFO_CACHE1_METHOD(ptr));
+				data = nv_rd32(dev,
+					NV40_PFIFO_CACHE1_DATA(ptr));
+			}
+
+			if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) {
+				NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
+					     "Mthd 0x%04x Data 0x%08x\n",
+					chid, (mthd >> 13) & 7, mthd & 0x1ffc,
+					data);
+			}
+
+			nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+			nv_wr32(dev, NV03_PFIFO_INTR_0,
+						NV_PFIFO_INTR_CACHE_ERROR);
+
+			nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
+				nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
+			nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
+			nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
+				nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
+			nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
+
+			nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
+				nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+			nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+
+			status &= ~NV_PFIFO_INTR_CACHE_ERROR;
+		}
+
+		if (status & NV_PFIFO_INTR_DMA_PUSHER) {
+			NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d\n", chid);
+
+			status &= ~NV_PFIFO_INTR_DMA_PUSHER;
+			nv_wr32(dev, NV03_PFIFO_INTR_0,
+						NV_PFIFO_INTR_DMA_PUSHER);
+
+			nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
+			if (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT) != get)
+				nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET,
+								get + 4);
+		}
+
+		if (status) {
+			NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
+				status, chid);
+			nv_wr32(dev, NV03_PFIFO_INTR_0, status);
+			status = 0;
+		}
+
+		nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
+	}
+
+	if (status) {
+		NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
+		nv_wr32(dev, 0x2140, 0);
+		nv_wr32(dev, 0x140, 0);
+	}
+
+	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
+}
+
+struct nouveau_bitfield_names {
+	uint32_t mask;
+	const char *name;
+};
+
+static struct nouveau_bitfield_names nstatus_names[] =
+{
+	{ NV04_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
+	{ NV04_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
+	{ NV04_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
+	{ NV04_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" }
+};
+
+static struct nouveau_bitfield_names nstatus_names_nv10[] =
+{
+	{ NV10_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
+	{ NV10_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
+	{ NV10_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
+	{ NV10_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" }
+};
+
+static struct nouveau_bitfield_names nsource_names[] =
+{
+	{ NV03_PGRAPH_NSOURCE_NOTIFICATION,       "NOTIFICATION" },
+	{ NV03_PGRAPH_NSOURCE_DATA_ERROR,         "DATA_ERROR" },
+	{ NV03_PGRAPH_NSOURCE_PROTECTION_ERROR,   "PROTECTION_ERROR" },
+	{ NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION,    "RANGE_EXCEPTION" },
+	{ NV03_PGRAPH_NSOURCE_LIMIT_COLOR,        "LIMIT_COLOR" },
+	{ NV03_PGRAPH_NSOURCE_LIMIT_ZETA,         "LIMIT_ZETA" },
+	{ NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD,       "ILLEGAL_MTHD" },
+	{ NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION,   "DMA_R_PROTECTION" },
+	{ NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION,   "DMA_W_PROTECTION" },
+	{ NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION,   "FORMAT_EXCEPTION" },
+	{ NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION,    "PATCH_EXCEPTION" },
+	{ NV03_PGRAPH_NSOURCE_STATE_INVALID,      "STATE_INVALID" },
+	{ NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY,      "DOUBLE_NOTIFY" },
+	{ NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE,      "NOTIFY_IN_USE" },
+	{ NV03_PGRAPH_NSOURCE_METHOD_CNT,         "METHOD_CNT" },
+	{ NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION,   "BFR_NOTIFICATION" },
+	{ NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
+	{ NV03_PGRAPH_NSOURCE_DMA_WIDTH_A,        "DMA_WIDTH_A" },
+	{ NV03_PGRAPH_NSOURCE_DMA_WIDTH_B,        "DMA_WIDTH_B" },
+};
+
+static void
+nouveau_print_bitfield_names_(uint32_t value,
+				const struct nouveau_bitfield_names *namelist,
+				const int namelist_len)
+{
+	/*
+	 * Caller must have already printed the KERN_* log level for us.
+	 * Also the caller is responsible for adding the newline.
+	 */
+	int i;
+	for (i = 0; i < namelist_len; ++i) {
+		uint32_t mask = namelist[i].mask;
+		if (value & mask) {
+			printk(" %s", namelist[i].name);
+			value &= ~mask;
+		}
+	}
+	if (value)
+		printk(" (unknown bits 0x%08x)", value);
+}
+#define nouveau_print_bitfield_names(val, namelist) \
+	nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
+
+
+static int
+nouveau_graph_chid_from_grctx(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t inst;
+	int i;
+
+	if (dev_priv->card_type < NV_40)
+		return dev_priv->engine.fifo.channels;
+	else
+	if (dev_priv->card_type < NV_50) {
+		inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
+
+		for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+			struct nouveau_channel *chan = dev_priv->fifos[i];
+
+			if (!chan || !chan->ramin_grctx)
+				continue;
+
+			if (inst == chan->ramin_grctx->instance)
+				break;
+		}
+	} else {
+		inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
+
+		for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+			struct nouveau_channel *chan = dev_priv->fifos[i];
+
+			if (!chan || !chan->ramin)
+				continue;
+
+			if (inst == chan->ramin->instance)
+				break;
+		}
+	}
+
+
+	return i;
+}
+
+static int
+nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_engine *engine = &dev_priv->engine;
+	int channel;
+
+	if (dev_priv->card_type < NV_10)
+		channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
+	else
+	if (dev_priv->card_type < NV_40)
+		channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
+	else
+		channel = nouveau_graph_chid_from_grctx(dev);
+
+	if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
+		NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
+		return -EINVAL;
+	}
+
+	*channel_ret = channel;
+	return 0;
+}
+
+struct nouveau_pgraph_trap {
+	int channel;
+	int class;
+	int subc, mthd, size;
+	uint32_t data, data2;
+	uint32_t nsource, nstatus;
+};
+
+static void
+nouveau_graph_trap_info(struct drm_device *dev,
+			struct nouveau_pgraph_trap *trap)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t address;
+
+	trap->nsource = trap->nstatus = 0;
+	if (dev_priv->card_type < NV_50) {
+		trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+		trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+	}
+
+	if (nouveau_graph_trapped_channel(dev, &trap->channel))
+		trap->channel = -1;
+	address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+
+	trap->mthd = address & 0x1FFC;
+	trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+	if (dev_priv->card_type < NV_10) {
+		trap->subc  = (address >> 13) & 0x7;
+	} else {
+		trap->subc  = (address >> 16) & 0x7;
+		trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
+	}
+
+	if (dev_priv->card_type < NV_10)
+		trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
+	else if (dev_priv->card_type < NV_40)
+		trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
+	else if (dev_priv->card_type < NV_50)
+		trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
+	else
+		trap->class = nv_rd32(dev, 0x400814);
+}
+
+static void
+nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
+			     struct nouveau_pgraph_trap *trap)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
+
+	NV_INFO(dev, "%s - nSource:", id);
+	nouveau_print_bitfield_names(nsource, nsource_names);
+	printk(", nStatus:");
+	if (dev_priv->card_type < NV_10)
+		nouveau_print_bitfield_names(nstatus, nstatus_names);
+	else
+		nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
+	printk("\n");
+
+	NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
+					"Data 0x%08x:0x%08x\n",
+					id, trap->channel, trap->subc,
+					trap->class, trap->mthd,
+					trap->data2, trap->data);
+}
+
+static int
+nouveau_pgraph_intr_swmthd(struct drm_device *dev,
+			   struct nouveau_pgraph_trap *trap)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (trap->channel < 0 ||
+	    trap->channel >= dev_priv->engine.fifo.channels ||
+	    !dev_priv->fifos[trap->channel])
+		return -ENODEV;
+
+	return nouveau_call_method(dev_priv->fifos[trap->channel],
+				   trap->class, trap->mthd, trap->data);
+}
+
+static inline void
+nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
+{
+	struct nouveau_pgraph_trap trap;
+	int unhandled = 0;
+
+	nouveau_graph_trap_info(dev, &trap);
+
+	if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+		if (nouveau_pgraph_intr_swmthd(dev, &trap))
+			unhandled = 1;
+	} else {
+		unhandled = 1;
+	}
+
+	if (unhandled)
+		nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
+}
+
+static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
+
+static int nouveau_ratelimit(void)
+{
+	return __ratelimit(&nouveau_ratelimit_state);
+}
+
+
+static inline void
+nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
+{
+	struct nouveau_pgraph_trap trap;
+	int unhandled = 0;
+
+	nouveau_graph_trap_info(dev, &trap);
+	trap.nsource = nsource;
+
+	if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+		if (nouveau_pgraph_intr_swmthd(dev, &trap))
+			unhandled = 1;
+	} else {
+		unhandled = 1;
+	}
+
+	if (unhandled && nouveau_ratelimit())
+		nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
+}
+
+static inline void
+nouveau_pgraph_intr_context_switch(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_engine *engine = &dev_priv->engine;
+	uint32_t chid;
+
+	chid = engine->fifo.channel_id(dev);
+	NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
+
+	switch (dev_priv->card_type) {
+	case NV_04:
+		nv04_graph_context_switch(dev);
+		break;
+	case NV_10:
+		nv10_graph_context_switch(dev);
+		break;
+	default:
+		NV_ERROR(dev, "Context switch not implemented\n");
+		break;
+	}
+}
+
+static void
+nouveau_pgraph_irq_handler(struct drm_device *dev)
+{
+	uint32_t status;
+
+	while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+		uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+
+		if (status & NV_PGRAPH_INTR_NOTIFY) {
+			nouveau_pgraph_intr_notify(dev, nsource);
+
+			status &= ~NV_PGRAPH_INTR_NOTIFY;
+			nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
+		}
+
+		if (status & NV_PGRAPH_INTR_ERROR) {
+			nouveau_pgraph_intr_error(dev, nsource);
+
+			status &= ~NV_PGRAPH_INTR_ERROR;
+			nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
+		}
+
+		if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
+			nouveau_pgraph_intr_context_switch(dev);
+
+			status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+			nv_wr32(dev, NV03_PGRAPH_INTR,
+				 NV_PGRAPH_INTR_CONTEXT_SWITCH);
+		}
+
+		if (status) {
+			NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
+			nv_wr32(dev, NV03_PGRAPH_INTR, status);
+		}
+
+		if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
+			nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
+	}
+
+	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
+}
+
+static void
+nv50_pgraph_irq_handler(struct drm_device *dev)
+{
+	uint32_t status, nsource;
+
+	status = nv_rd32(dev, NV03_PGRAPH_INTR);
+	nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+
+	if (status & 0x00000001) {
+		nouveau_pgraph_intr_notify(dev, nsource);
+		status &= ~0x00000001;
+		nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
+	}
+
+	if (status & 0x00000010) {
+		nouveau_pgraph_intr_error(dev, nsource |
+					  NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
+
+		status &= ~0x00000010;
+		nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
+	}
+
+	if (status & 0x00001000) {
+		nv_wr32(dev, 0x400500, 0x00000000);
+		nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+		nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
+			NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
+		nv_wr32(dev, 0x400500, 0x00010001);
+
+		nv50_graph_context_switch(dev);
+
+		status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+	}
+
+	if (status & 0x00100000) {
+		nouveau_pgraph_intr_error(dev, nsource |
+					  NV03_PGRAPH_NSOURCE_DATA_ERROR);
+
+		status &= ~0x00100000;
+		nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
+	}
+
+	if (status & 0x00200000) {
+		int r;
+
+		nouveau_pgraph_intr_error(dev, nsource |
+					  NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
+
+		NV_ERROR(dev, "magic set 1:\n");
+		for (r = 0x408900; r <= 0x408910; r += 4)
+			NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
+		nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000);
+		for (r = 0x408e08; r <= 0x408e24; r += 4)
+			NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
+		nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000);
+
+		NV_ERROR(dev, "magic set 2:\n");
+		for (r = 0x409900; r <= 0x409910; r += 4)
+			NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
+		nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000);
+		for (r = 0x409e08; r <= 0x409e24; r += 4)
+			NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
+		nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000);
+
+		status &= ~0x00200000;
+		nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
+		nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
+	}
+
+	if (status) {
+		NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
+		nv_wr32(dev, NV03_PGRAPH_INTR, status);
+	}
+
+	{
+		const int isb = (1 << 16) | (1 << 0);
+
+		if ((nv_rd32(dev, 0x400500) & isb) != isb)
+			nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb);
+	}
+
+	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
+}
+
+static void
+nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
+{
+	if (crtc & 1)
+		nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
+
+	if (crtc & 2)
+		nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
+}
+
+irqreturn_t
+nouveau_irq_handler(DRM_IRQ_ARGS)
+{
+	struct drm_device *dev = (struct drm_device *)arg;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t status, fbdev_flags = 0;
+
+	status = nv_rd32(dev, NV03_PMC_INTR_0);
+	if (!status)
+		return IRQ_NONE;
+
+	if (dev_priv->fbdev_info) {
+		fbdev_flags = dev_priv->fbdev_info->flags;
+		dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
+	}
+
+	if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
+		nouveau_fifo_irq_handler(dev);
+		status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
+	}
+
+	if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
+		if (dev_priv->card_type >= NV_50)
+			nv50_pgraph_irq_handler(dev);
+		else
+			nouveau_pgraph_irq_handler(dev);
+
+		status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
+	}
+
+	if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
+		nouveau_crtc_irq_handler(dev, (status>>24)&3);
+		status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
+	}
+
+	if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
+		      NV_PMC_INTR_0_NV50_I2C_PENDING)) {
+		nv50_display_irq_handler(dev);
+		status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
+			    NV_PMC_INTR_0_NV50_I2C_PENDING);
+	}
+
+	if (status)
+		NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
+
+	if (dev_priv->fbdev_info)
+		dev_priv->fbdev_info->flags = fbdev_flags;
+
+	return IRQ_HANDLED;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
new file mode 100644
index 0000000..0275571
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -0,0 +1,568 @@
+/*
+ * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
+ * Copyright 2005 Stephane Marchesin
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_sarea.h"
+#include "nouveau_drv.h"
+
+static struct mem_block *
+split_block(struct mem_block *p, uint64_t start, uint64_t size,
+	    struct drm_file *file_priv)
+{
+	/* Maybe cut off the start of an existing block */
+	if (start > p->start) {
+		struct mem_block *newblock =
+			kmalloc(sizeof(*newblock), GFP_KERNEL);
+		if (!newblock)
+			goto out;
+		newblock->start = start;
+		newblock->size = p->size - (start - p->start);
+		newblock->file_priv = NULL;
+		newblock->next = p->next;
+		newblock->prev = p;
+		p->next->prev = newblock;
+		p->next = newblock;
+		p->size -= newblock->size;
+		p = newblock;
+	}
+
+	/* Maybe cut off the end of an existing block */
+	if (size < p->size) {
+		struct mem_block *newblock =
+			kmalloc(sizeof(*newblock), GFP_KERNEL);
+		if (!newblock)
+			goto out;
+		newblock->start = start + size;
+		newblock->size = p->size - size;
+		newblock->file_priv = NULL;
+		newblock->next = p->next;
+		newblock->prev = p;
+		p->next->prev = newblock;
+		p->next = newblock;
+		p->size = size;
+	}
+
+out:
+	/* Our block is in the middle */
+	p->file_priv = file_priv;
+	return p;
+}
+
+struct mem_block *
+nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size,
+			int align2, struct drm_file *file_priv, int tail)
+{
+	struct mem_block *p;
+	uint64_t mask = (1 << align2) - 1;
+
+	if (!heap)
+		return NULL;
+
+	if (tail) {
+		list_for_each_prev(p, heap) {
+			uint64_t start = ((p->start + p->size) - size) & ~mask;
+
+			if (p->file_priv == NULL && start >= p->start &&
+			    start + size <= p->start + p->size)
+				return split_block(p, start, size, file_priv);
+		}
+	} else {
+		list_for_each(p, heap) {
+			uint64_t start = (p->start + mask) & ~mask;
+
+			if (p->file_priv == NULL &&
+			    start + size <= p->start + p->size)
+				return split_block(p, start, size, file_priv);
+		}
+	}
+
+	return NULL;
+}
+
+void nouveau_mem_free_block(struct mem_block *p)
+{
+	p->file_priv = NULL;
+
+	/* Assumes a single contiguous range.  Needs a special file_priv in
+	 * 'heap' to stop it being subsumed.
+	 */
+	if (p->next->file_priv == NULL) {
+		struct mem_block *q = p->next;
+		p->size += q->size;
+		p->next = q->next;
+		p->next->prev = p;
+		kfree(q);
+	}
+
+	if (p->prev->file_priv == NULL) {
+		struct mem_block *q = p->prev;
+		q->size += p->size;
+		q->next = p->next;
+		q->next->prev = q;
+		kfree(p);
+	}
+}
+
+/* Initialize.  How to check for an uninitialized heap?
+ */
+int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start,
+			  uint64_t size)
+{
+	struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
+
+	if (!blocks)
+		return -ENOMEM;
+
+	*heap = kmalloc(sizeof(**heap), GFP_KERNEL);
+	if (!*heap) {
+		kfree(blocks);
+		return -ENOMEM;
+	}
+
+	blocks->start = start;
+	blocks->size = size;
+	blocks->file_priv = NULL;
+	blocks->next = blocks->prev = *heap;
+
+	memset(*heap, 0, sizeof(**heap));
+	(*heap)->file_priv = (struct drm_file *) -1;
+	(*heap)->next = (*heap)->prev = blocks;
+	return 0;
+}
+
+/*
+ * Free all blocks associated with the releasing file_priv
+ */
+void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
+{
+	struct mem_block *p;
+
+	if (!heap || !heap->next)
+		return;
+
+	list_for_each(p, heap) {
+		if (p->file_priv == file_priv)
+			p->file_priv = NULL;
+	}
+
+	/* Assumes a single contiguous range.  Needs a special file_priv in
+	 * 'heap' to stop it being subsumed.
+	 */
+	list_for_each(p, heap) {
+		while ((p->file_priv == NULL) &&
+					(p->next->file_priv == NULL) &&
+					(p->next != heap)) {
+			struct mem_block *q = p->next;
+			p->size += q->size;
+			p->next = q->next;
+			p->next->prev = p;
+			kfree(q);
+		}
+	}
+}
+
+/*
+ * NV50 VM helpers
+ */
+int
+nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
+			uint32_t flags, uint64_t phys)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj **pgt;
+	unsigned psz, pfl, pages;
+
+	if (virt >= dev_priv->vm_gart_base &&
+	    (virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) {
+		psz = 12;
+		pgt = &dev_priv->gart_info.sg_ctxdma;
+		pfl = 0x21;
+		virt -= dev_priv->vm_gart_base;
+	} else
+	if (virt >= dev_priv->vm_vram_base &&
+	    (virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) {
+		psz = 16;
+		pgt = dev_priv->vm_vram_pt;
+		pfl = 0x01;
+		virt -= dev_priv->vm_vram_base;
+	} else {
+		NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n",
+			 virt, virt + size - 1);
+		return -EINVAL;
+	}
+
+	pages = size >> psz;
+
+	dev_priv->engine.instmem.prepare_access(dev, true);
+	if (flags & 0x80000000) {
+		while (pages--) {
+			struct nouveau_gpuobj *pt = pgt[virt >> 29];
+			unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
+
+			nv_wo32(dev, pt, pte++, 0x00000000);
+			nv_wo32(dev, pt, pte++, 0x00000000);
+
+			virt += (1 << psz);
+		}
+	} else {
+		while (pages--) {
+			struct nouveau_gpuobj *pt = pgt[virt >> 29];
+			unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
+			unsigned offset_h = upper_32_bits(phys) & 0xff;
+			unsigned offset_l = lower_32_bits(phys);
+
+			nv_wo32(dev, pt, pte++, offset_l | pfl);
+			nv_wo32(dev, pt, pte++, offset_h | flags);
+
+			phys += (1 << psz);
+			virt += (1 << psz);
+		}
+	}
+	dev_priv->engine.instmem.finish_access(dev);
+
+	nv_wr32(dev, 0x100c80, 0x00050001);
+	if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+		NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+		NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
+		return -EBUSY;
+	}
+
+	nv_wr32(dev, 0x100c80, 0x00000001);
+	if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+		NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+		NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+void
+nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
+{
+	nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0);
+}
+
+/*
+ * Cleanup everything
+ */
+void nouveau_mem_takedown(struct mem_block **heap)
+{
+	struct mem_block *p;
+
+	if (!*heap)
+		return;
+
+	for (p = (*heap)->next; p != *heap;) {
+		struct mem_block *q = p;
+		p = p->next;
+		kfree(q);
+	}
+
+	kfree(*heap);
+	*heap = NULL;
+}
+
+void nouveau_mem_close(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->ttm.bdev.man[TTM_PL_PRIV0].has_type)
+		ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_PRIV0);
+	ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
+
+	ttm_bo_device_release(&dev_priv->ttm.bdev);
+
+	nouveau_ttm_global_release(dev_priv);
+
+	if (drm_core_has_AGP(dev) && dev->agp &&
+	    drm_core_check_feature(dev, DRIVER_MODESET)) {
+		struct drm_agp_mem *entry, *tempe;
+
+		/* Remove AGP resources, but leave dev->agp
+		   intact until drv_cleanup is called. */
+		list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
+			if (entry->bound)
+				drm_unbind_agp(entry->memory);
+			drm_free_agp(entry->memory, entry->pages);
+			kfree(entry);
+		}
+		INIT_LIST_HEAD(&dev->agp->memory);
+
+		if (dev->agp->acquired)
+			drm_agp_release(dev);
+
+		dev->agp->acquired = 0;
+		dev->agp->enabled = 0;
+	}
+
+	if (dev_priv->fb_mtrr) {
+		drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1),
+			     drm_get_resource_len(dev, 1), DRM_MTRR_WC);
+		dev_priv->fb_mtrr = 0;
+	}
+}
+
+/*XXX won't work on BSD because of pci_read_config_dword */
+static uint32_t
+nouveau_mem_fb_amount_igp(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct pci_dev *bridge;
+	uint32_t mem;
+
+	bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
+	if (!bridge) {
+		NV_ERROR(dev, "no bridge device\n");
+		return 0;
+	}
+
+	if (dev_priv->flags&NV_NFORCE) {
+		pci_read_config_dword(bridge, 0x7C, &mem);
+		return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
+	} else
+	if (dev_priv->flags&NV_NFORCE2) {
+		pci_read_config_dword(bridge, 0x84, &mem);
+		return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
+	}
+
+	NV_ERROR(dev, "impossible!\n");
+	return 0;
+}
+
+/* returns the amount of FB ram in bytes */
+uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t boot0;
+
+	switch (dev_priv->card_type) {
+	case NV_04:
+		boot0 = nv_rd32(dev, NV03_BOOT_0);
+		if (boot0 & 0x00000100)
+			return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024;
+
+		switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) {
+		case NV04_BOOT_0_RAM_AMOUNT_32MB:
+			return 32 * 1024 * 1024;
+		case NV04_BOOT_0_RAM_AMOUNT_16MB:
+			return 16 * 1024 * 1024;
+		case NV04_BOOT_0_RAM_AMOUNT_8MB:
+			return 8 * 1024 * 1024;
+		case NV04_BOOT_0_RAM_AMOUNT_4MB:
+			return 4 * 1024 * 1024;
+		}
+		break;
+	case NV_10:
+	case NV_20:
+	case NV_30:
+	case NV_40:
+	case NV_50:
+	default:
+		if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
+			return nouveau_mem_fb_amount_igp(dev);
+		} else {
+			uint64_t mem;
+			mem = (nv_rd32(dev, NV04_FIFO_DATA) &
+					NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >>
+					NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
+			return mem * 1024 * 1024;
+		}
+		break;
+	}
+
+	NV_ERROR(dev,
+		"Unable to detect video ram size. Please report your setup to "
+							DRIVER_EMAIL "\n");
+	return 0;
+}
+
+static void nouveau_mem_reset_agp(struct drm_device *dev)
+{
+	uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable;
+
+	saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1);
+	saved_pci_nv_19 = nv_rd32(dev, NV04_PBUS_PCI_NV_19);
+
+	/* clear busmaster bit */
+	nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
+	/* clear SBA and AGP bits */
+	nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff);
+
+	/* power cycle pgraph, if enabled */
+	pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
+	if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
+		nv_wr32(dev, NV03_PMC_ENABLE,
+				pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
+		nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
+				NV_PMC_ENABLE_PGRAPH);
+	}
+
+	/* and restore (gives effect of resetting AGP) */
+	nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19);
+	nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
+}
+
+int
+nouveau_mem_init_agp(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct drm_agp_info info;
+	struct drm_agp_mode mode;
+	int ret;
+
+	if (nouveau_noagp)
+		return 0;
+
+	nouveau_mem_reset_agp(dev);
+
+	if (!dev->agp->acquired) {
+		ret = drm_agp_acquire(dev);
+		if (ret) {
+			NV_ERROR(dev, "Unable to acquire AGP: %d\n", ret);
+			return ret;
+		}
+	}
+
+	ret = drm_agp_info(dev, &info);
+	if (ret) {
+		NV_ERROR(dev, "Unable to get AGP info: %d\n", ret);
+		return ret;
+	}
+
+	/* see agp.h for the AGPSTAT_* modes available */
+	mode.mode = info.mode;
+	ret = drm_agp_enable(dev, mode);
+	if (ret) {
+		NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
+		return ret;
+	}
+
+	dev_priv->gart_info.type	= NOUVEAU_GART_AGP;
+	dev_priv->gart_info.aper_base	= info.aperture_base;
+	dev_priv->gart_info.aper_size	= info.aperture_size;
+	return 0;
+}
+
+int
+nouveau_mem_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+	int ret, dma_bits = 32;
+
+	dev_priv->fb_phys = drm_get_resource_start(dev, 1);
+	dev_priv->gart_info.type = NOUVEAU_GART_NONE;
+
+	if (dev_priv->card_type >= NV_50 &&
+	    pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
+		dma_bits = 40;
+
+	ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
+	if (ret) {
+		NV_ERROR(dev, "Error setting DMA mask: %d\n", ret);
+		return ret;
+	}
+
+	ret = nouveau_ttm_global_init(dev_priv);
+	if (ret)
+		return ret;
+
+	ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
+				 dev_priv->ttm.bo_global_ref.ref.object,
+				 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
+				 dma_bits <= 32 ? true : false);
+	if (ret) {
+		NV_ERROR(dev, "Error initialising bo driver: %d\n", ret);
+		return ret;
+	}
+
+	INIT_LIST_HEAD(&dev_priv->ttm.bo_list);
+	spin_lock_init(&dev_priv->ttm.bo_list_lock);
+
+	dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
+
+	dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
+	if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1))
+		dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1);
+	dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
+
+	NV_INFO(dev, "%d MiB VRAM\n", (int)(dev_priv->fb_available_size >> 20));
+
+	/* remove reserved space at end of vram from available amount */
+	dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
+	dev_priv->fb_aper_free = dev_priv->fb_available_size;
+
+	/* mappable vram */
+	ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+			     dev_priv->fb_available_size >> PAGE_SHIFT);
+	if (ret) {
+		NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret);
+		return ret;
+	}
+
+	/* GART */
+#if !defined(__powerpc__) && !defined(__ia64__)
+	if (drm_device_is_agp(dev) && dev->agp) {
+		ret = nouveau_mem_init_agp(dev);
+		if (ret)
+			NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
+	}
+#endif
+
+	if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
+		ret = nouveau_sgdma_init(dev);
+		if (ret) {
+			NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret);
+			return ret;
+		}
+	}
+
+	NV_INFO(dev, "%d MiB GART (aperture)\n",
+		(int)(dev_priv->gart_info.aper_size >> 20));
+	dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size;
+
+	ret = ttm_bo_init_mm(bdev, TTM_PL_TT,
+			     dev_priv->gart_info.aper_size >> PAGE_SHIFT);
+	if (ret) {
+		NV_ERROR(dev, "Failed TT mm init: %d\n", ret);
+		return ret;
+	}
+
+	dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
+					 drm_get_resource_len(dev, 1),
+					 DRM_MTRR_WC);
+	return 0;
+}
+
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
new file mode 100644
index 0000000..6c66a34
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+int
+nouveau_notifier_init_channel(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct nouveau_bo *ntfy = NULL;
+	int ret;
+
+	ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, nouveau_vram_notify ?
+			      TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT,
+			      0, 0x0000, false, true, &ntfy);
+	if (ret)
+		return ret;
+
+	ret = nouveau_bo_pin(ntfy, TTM_PL_FLAG_VRAM);
+	if (ret)
+		goto out_err;
+
+	ret = nouveau_bo_map(ntfy);
+	if (ret)
+		goto out_err;
+
+	ret = nouveau_mem_init_heap(&chan->notifier_heap, 0, ntfy->bo.mem.size);
+	if (ret)
+		goto out_err;
+
+	chan->notifier_bo = ntfy;
+out_err:
+	if (ret) {
+		mutex_lock(&dev->struct_mutex);
+		drm_gem_object_unreference(ntfy->gem);
+		mutex_unlock(&dev->struct_mutex);
+	}
+
+	return ret;
+}
+
+void
+nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+
+	if (!chan->notifier_bo)
+		return;
+
+	nouveau_bo_unmap(chan->notifier_bo);
+	mutex_lock(&dev->struct_mutex);
+	nouveau_bo_unpin(chan->notifier_bo);
+	drm_gem_object_unreference(chan->notifier_bo->gem);
+	mutex_unlock(&dev->struct_mutex);
+	nouveau_mem_takedown(&chan->notifier_heap);
+}
+
+static void
+nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
+			     struct nouveau_gpuobj *gpuobj)
+{
+	NV_DEBUG(dev, "\n");
+
+	if (gpuobj->priv)
+		nouveau_mem_free_block(gpuobj->priv);
+}
+
+int
+nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
+		       int size, uint32_t *b_offset)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj *nobj = NULL;
+	struct mem_block *mem;
+	uint32_t offset;
+	int target, ret;
+
+	if (!chan->notifier_heap) {
+		NV_ERROR(dev, "Channel %d doesn't have a notifier heap!\n",
+			 chan->id);
+		return -EINVAL;
+	}
+
+	mem = nouveau_mem_alloc_block(chan->notifier_heap, size, 0,
+				      (struct drm_file *)-2, 0);
+	if (!mem) {
+		NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
+		return -ENOMEM;
+	}
+
+	offset = chan->notifier_bo->bo.mem.mm_node->start << PAGE_SHIFT;
+	if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) {
+		target = NV_DMA_TARGET_VIDMEM;
+	} else
+	if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_TT) {
+		if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA &&
+		    dev_priv->card_type < NV_50) {
+			ret = nouveau_sgdma_get_page(dev, offset, &offset);
+			if (ret)
+				return ret;
+			target = NV_DMA_TARGET_PCI;
+		} else {
+			target = NV_DMA_TARGET_AGP;
+		}
+	} else {
+		NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
+			 chan->notifier_bo->bo.mem.mem_type);
+		return -EINVAL;
+	}
+	offset += mem->start;
+
+	ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
+				     mem->size, NV_DMA_ACCESS_RW, target,
+				     &nobj);
+	if (ret) {
+		nouveau_mem_free_block(mem);
+		NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret);
+		return ret;
+	}
+	nobj->dtor   = nouveau_notifier_gpuobj_dtor;
+	nobj->priv   = mem;
+
+	ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL);
+	if (ret) {
+		nouveau_gpuobj_del(dev, &nobj);
+		nouveau_mem_free_block(mem);
+		NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret);
+		return ret;
+	}
+
+	*b_offset = mem->start;
+	return 0;
+}
+
+int
+nouveau_notifier_offset(struct nouveau_gpuobj *nobj, uint32_t *poffset)
+{
+	if (!nobj || nobj->dtor != nouveau_notifier_gpuobj_dtor)
+		return -EINVAL;
+
+	if (poffset) {
+		struct mem_block *mem = nobj->priv;
+
+		if (*poffset >= mem->size)
+			return false;
+
+		*poffset += mem->start;
+	}
+
+	return 0;
+}
+
+int
+nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv)
+{
+	struct drm_nouveau_notifierobj_alloc *na = data;
+	struct nouveau_channel *chan;
+	int ret;
+
+	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan);
+
+	ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset);
+	if (ret)
+		return ret;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
new file mode 100644
index 0000000..93379bb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -0,0 +1,1294 @@
+/*
+ * Copyright (C) 2006 Ben Skeggs.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ *   Ben Skeggs <darktama@iinet.net.au>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+/* NVidia uses context objects to drive drawing operations.
+
+   Context objects can be selected into 8 subchannels in the FIFO,
+   and then used via DMA command buffers.
+
+   A context object is referenced by a user defined handle (CARD32). The HW
+   looks up graphics objects in a hash table in the instance RAM.
+
+   An entry in the hash table consists of 2 CARD32. The first CARD32 contains
+   the handle, the second one a bitfield, that contains the address of the
+   object in instance RAM.
+
+   The format of the second CARD32 seems to be:
+
+   NV4 to NV30:
+
+   15: 0  instance_addr >> 4
+   17:16  engine (here uses 1 = graphics)
+   28:24  channel id (here uses 0)
+   31	  valid (use 1)
+
+   NV40:
+
+   15: 0  instance_addr >> 4   (maybe 19-0)
+   21:20  engine (here uses 1 = graphics)
+   I'm unsure about the other bits, but using 0 seems to work.
+
+   The key into the hash table depends on the object handle and channel id and
+   is given as:
+*/
+static uint32_t
+nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t hash = 0;
+	int i;
+
+	NV_DEBUG(dev, "ch%d handle=0x%08x\n", channel, handle);
+
+	for (i = 32; i > 0; i -= dev_priv->ramht_bits) {
+		hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1));
+		handle >>= dev_priv->ramht_bits;
+	}
+
+	if (dev_priv->card_type < NV_50)
+		hash ^= channel << (dev_priv->ramht_bits - 4);
+	hash <<= 3;
+
+	NV_DEBUG(dev, "hash=0x%08x\n", hash);
+	return hash;
+}
+
+static int
+nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
+			  uint32_t offset)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t ctx = nv_ro32(dev, ramht, (offset + 4)/4);
+
+	if (dev_priv->card_type < NV_40)
+		return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
+	return (ctx != 0);
+}
+
+static int
+nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
+	struct nouveau_channel *chan = ref->channel;
+	struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
+	uint32_t ctx, co, ho;
+
+	if (!ramht) {
+		NV_ERROR(dev, "No hash table!\n");
+		return -EINVAL;
+	}
+
+	if (dev_priv->card_type < NV_40) {
+		ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) |
+		      (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
+		      (ref->gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
+	} else
+	if (dev_priv->card_type < NV_50) {
+		ctx = (ref->instance >> 4) |
+		      (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
+		      (ref->gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
+	} else {
+		if (ref->gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
+			ctx = (ref->instance << 10) | 2;
+		} else {
+			ctx = (ref->instance >> 4) |
+			      ((ref->gpuobj->engine <<
+				NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
+		}
+	}
+
+	instmem->prepare_access(dev, true);
+	co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
+	do {
+		if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
+			NV_DEBUG(dev,
+				 "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
+				 chan->id, co, ref->handle, ctx);
+			nv_wo32(dev, ramht, (co + 0)/4, ref->handle);
+			nv_wo32(dev, ramht, (co + 4)/4, ctx);
+
+			list_add_tail(&ref->list, &chan->ramht_refs);
+			instmem->finish_access(dev);
+			return 0;
+		}
+		NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
+			 chan->id, co, nv_ro32(dev, ramht, co/4));
+
+		co += 8;
+		if (co >= dev_priv->ramht_size)
+			co = 0;
+	} while (co != ho);
+	instmem->finish_access(dev);
+
+	NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
+	return -ENOMEM;
+}
+
+static void
+nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
+	struct nouveau_channel *chan = ref->channel;
+	struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
+	uint32_t co, ho;
+
+	if (!ramht) {
+		NV_ERROR(dev, "No hash table!\n");
+		return;
+	}
+
+	instmem->prepare_access(dev, true);
+	co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
+	do {
+		if (nouveau_ramht_entry_valid(dev, ramht, co) &&
+		    (ref->handle == nv_ro32(dev, ramht, (co/4)))) {
+			NV_DEBUG(dev,
+				 "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
+				 chan->id, co, ref->handle,
+				 nv_ro32(dev, ramht, (co + 4)));
+			nv_wo32(dev, ramht, (co + 0)/4, 0x00000000);
+			nv_wo32(dev, ramht, (co + 4)/4, 0x00000000);
+
+			list_del(&ref->list);
+			instmem->finish_access(dev);
+			return;
+		}
+
+		co += 8;
+		if (co >= dev_priv->ramht_size)
+			co = 0;
+	} while (co != ho);
+	list_del(&ref->list);
+	instmem->finish_access(dev);
+
+	NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
+		 chan->id, ref->handle);
+}
+
+int
+nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
+		   uint32_t size, int align, uint32_t flags,
+		   struct nouveau_gpuobj **gpuobj_ret)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_engine *engine = &dev_priv->engine;
+	struct nouveau_gpuobj *gpuobj;
+	struct mem_block *pramin = NULL;
+	int ret;
+
+	NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
+		 chan ? chan->id : -1, size, align, flags);
+
+	if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
+		return -EINVAL;
+
+	gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
+	if (!gpuobj)
+		return -ENOMEM;
+	NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
+	gpuobj->flags = flags;
+	gpuobj->im_channel = chan;
+
+	list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+
+	/* Choose between global instmem heap, and per-channel private
+	 * instmem heap.  On <NV50 allow requests for private instmem
+	 * to be satisfied from global heap if no per-channel area
+	 * available.
+	 */
+	if (chan) {
+		if (chan->ramin_heap) {
+			NV_DEBUG(dev, "private heap\n");
+			pramin = chan->ramin_heap;
+		} else
+		if (dev_priv->card_type < NV_50) {
+			NV_DEBUG(dev, "global heap fallback\n");
+			pramin = dev_priv->ramin_heap;
+		}
+	} else {
+		NV_DEBUG(dev, "global heap\n");
+		pramin = dev_priv->ramin_heap;
+	}
+
+	if (!pramin) {
+		NV_ERROR(dev, "No PRAMIN heap!\n");
+		return -EINVAL;
+	}
+
+	if (!chan) {
+		ret = engine->instmem.populate(dev, gpuobj, &size);
+		if (ret) {
+			nouveau_gpuobj_del(dev, &gpuobj);
+			return ret;
+		}
+	}
+
+	/* Allocate a chunk of the PRAMIN aperture */
+	gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size,
+						    drm_order(align),
+						    (struct drm_file *)-2, 0);
+	if (!gpuobj->im_pramin) {
+		nouveau_gpuobj_del(dev, &gpuobj);
+		return -ENOMEM;
+	}
+
+	if (!chan) {
+		ret = engine->instmem.bind(dev, gpuobj);
+		if (ret) {
+			nouveau_gpuobj_del(dev, &gpuobj);
+			return ret;
+		}
+	}
+
+	if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
+		int i;
+
+		engine->instmem.prepare_access(dev, true);
+		for (i = 0; i < gpuobj->im_pramin->size; i += 4)
+			nv_wo32(dev, gpuobj, i/4, 0);
+		engine->instmem.finish_access(dev);
+	}
+
+	*gpuobj_ret = gpuobj;
+	return 0;
+}
+
+int
+nouveau_gpuobj_early_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	NV_DEBUG(dev, "\n");
+
+	INIT_LIST_HEAD(&dev_priv->gpuobj_list);
+
+	return 0;
+}
+
+int
+nouveau_gpuobj_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int ret;
+
+	NV_DEBUG(dev, "\n");
+
+	if (dev_priv->card_type < NV_50) {
+		ret = nouveau_gpuobj_new_fake(dev,
+			dev_priv->ramht_offset, ~0, dev_priv->ramht_size,
+			NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS,
+						&dev_priv->ramht, NULL);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+void
+nouveau_gpuobj_takedown(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	NV_DEBUG(dev, "\n");
+
+	nouveau_gpuobj_del(dev, &dev_priv->ramht);
+}
+
+void
+nouveau_gpuobj_late_takedown(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj *gpuobj = NULL;
+	struct list_head *entry, *tmp;
+
+	NV_DEBUG(dev, "\n");
+
+	list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) {
+		gpuobj = list_entry(entry, struct nouveau_gpuobj, list);
+
+		NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n",
+			 gpuobj, gpuobj->refcount);
+		gpuobj->refcount = 0;
+		nouveau_gpuobj_del(dev, &gpuobj);
+	}
+}
+
+int
+nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_engine *engine = &dev_priv->engine;
+	struct nouveau_gpuobj *gpuobj;
+	int i;
+
+	NV_DEBUG(dev, "gpuobj %p\n", pgpuobj ? *pgpuobj : NULL);
+
+	if (!dev_priv || !pgpuobj || !(*pgpuobj))
+		return -EINVAL;
+	gpuobj = *pgpuobj;
+
+	if (gpuobj->refcount != 0) {
+		NV_ERROR(dev, "gpuobj refcount is %d\n", gpuobj->refcount);
+		return -EINVAL;
+	}
+
+	if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
+		engine->instmem.prepare_access(dev, true);
+		for (i = 0; i < gpuobj->im_pramin->size; i += 4)
+			nv_wo32(dev, gpuobj, i/4, 0);
+		engine->instmem.finish_access(dev);
+	}
+
+	if (gpuobj->dtor)
+		gpuobj->dtor(dev, gpuobj);
+
+	if (gpuobj->im_backing && !(gpuobj->flags & NVOBJ_FLAG_FAKE))
+		engine->instmem.clear(dev, gpuobj);
+
+	if (gpuobj->im_pramin) {
+		if (gpuobj->flags & NVOBJ_FLAG_FAKE)
+			kfree(gpuobj->im_pramin);
+		else
+			nouveau_mem_free_block(gpuobj->im_pramin);
+	}
+
+	list_del(&gpuobj->list);
+
+	*pgpuobj = NULL;
+	kfree(gpuobj);
+	return 0;
+}
+
+static int
+nouveau_gpuobj_instance_get(struct drm_device *dev,
+			    struct nouveau_channel *chan,
+			    struct nouveau_gpuobj *gpuobj, uint32_t *inst)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj *cpramin;
+
+	/* <NV50 use PRAMIN address everywhere */
+	if (dev_priv->card_type < NV_50) {
+		*inst = gpuobj->im_pramin->start;
+		return 0;
+	}
+
+	if (chan && gpuobj->im_channel != chan) {
+		NV_ERROR(dev, "Channel mismatch: obj %d, ref %d\n",
+			 gpuobj->im_channel->id, chan->id);
+		return -EINVAL;
+	}
+
+	/* NV50 channel-local instance */
+	if (chan) {
+		cpramin = chan->ramin->gpuobj;
+		*inst = gpuobj->im_pramin->start - cpramin->im_pramin->start;
+		return 0;
+	}
+
+	/* NV50 global (VRAM) instance */
+	if (!gpuobj->im_channel) {
+		/* ...from global heap */
+		if (!gpuobj->im_backing) {
+			NV_ERROR(dev, "AII, no VRAM backing gpuobj\n");
+			return -EINVAL;
+		}
+		*inst = gpuobj->im_backing_start;
+		return 0;
+	} else {
+		/* ...from local heap */
+		cpramin = gpuobj->im_channel->ramin->gpuobj;
+		*inst = cpramin->im_backing_start +
+			(gpuobj->im_pramin->start - cpramin->im_pramin->start);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+int
+nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan,
+		       uint32_t handle, struct nouveau_gpuobj *gpuobj,
+		       struct nouveau_gpuobj_ref **ref_ret)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj_ref *ref;
+	uint32_t instance;
+	int ret;
+
+	NV_DEBUG(dev, "ch%d h=0x%08x gpuobj=%p\n",
+		 chan ? chan->id : -1, handle, gpuobj);
+
+	if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL))
+		return -EINVAL;
+
+	if (!chan && !ref_ret)
+		return -EINVAL;
+
+	if (gpuobj->engine == NVOBJ_ENGINE_SW && !gpuobj->im_pramin) {
+		/* sw object */
+		instance = 0x40;
+	} else {
+		ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance);
+		if (ret)
+			return ret;
+	}
+
+	ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+	if (!ref)
+		return -ENOMEM;
+	INIT_LIST_HEAD(&ref->list);
+	ref->gpuobj   = gpuobj;
+	ref->channel  = chan;
+	ref->instance = instance;
+
+	if (!ref_ret) {
+		ref->handle = handle;
+
+		ret = nouveau_ramht_insert(dev, ref);
+		if (ret) {
+			kfree(ref);
+			return ret;
+		}
+	} else {
+		ref->handle = ~0;
+		*ref_ret = ref;
+	}
+
+	ref->gpuobj->refcount++;
+	return 0;
+}
+
+int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref)
+{
+	struct nouveau_gpuobj_ref *ref;
+
+	NV_DEBUG(dev, "ref %p\n", pref ? *pref : NULL);
+
+	if (!dev || !pref || *pref == NULL)
+		return -EINVAL;
+	ref = *pref;
+
+	if (ref->handle != ~0)
+		nouveau_ramht_remove(dev, ref);
+
+	if (ref->gpuobj) {
+		ref->gpuobj->refcount--;
+
+		if (ref->gpuobj->refcount == 0) {
+			if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS))
+				nouveau_gpuobj_del(dev, &ref->gpuobj);
+		}
+	}
+
+	*pref = NULL;
+	kfree(ref);
+	return 0;
+}
+
+int
+nouveau_gpuobj_new_ref(struct drm_device *dev,
+		       struct nouveau_channel *oc, struct nouveau_channel *rc,
+		       uint32_t handle, uint32_t size, int align,
+		       uint32_t flags, struct nouveau_gpuobj_ref **ref)
+{
+	struct nouveau_gpuobj *gpuobj = NULL;
+	int ret;
+
+	ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref);
+	if (ret) {
+		nouveau_gpuobj_del(dev, &gpuobj);
+		return ret;
+	}
+
+	return 0;
+}
+
+int
+nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle,
+			struct nouveau_gpuobj_ref **ref_ret)
+{
+	struct nouveau_gpuobj_ref *ref;
+	struct list_head *entry, *tmp;
+
+	list_for_each_safe(entry, tmp, &chan->ramht_refs) {
+		ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
+
+		if (ref->handle == handle) {
+			if (ref_ret)
+				*ref_ret = ref;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+int
+nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
+			uint32_t b_offset, uint32_t size,
+			uint32_t flags, struct nouveau_gpuobj **pgpuobj,
+			struct nouveau_gpuobj_ref **pref)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj *gpuobj = NULL;
+	int i;
+
+	NV_DEBUG(dev,
+		 "p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n",
+		 p_offset, b_offset, size, flags);
+
+	gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
+	if (!gpuobj)
+		return -ENOMEM;
+	NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
+	gpuobj->im_channel = NULL;
+	gpuobj->flags      = flags | NVOBJ_FLAG_FAKE;
+
+	list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+
+	if (p_offset != ~0) {
+		gpuobj->im_pramin = kzalloc(sizeof(struct mem_block),
+					    GFP_KERNEL);
+		if (!gpuobj->im_pramin) {
+			nouveau_gpuobj_del(dev, &gpuobj);
+			return -ENOMEM;
+		}
+		gpuobj->im_pramin->start = p_offset;
+		gpuobj->im_pramin->size  = size;
+	}
+
+	if (b_offset != ~0) {
+		gpuobj->im_backing = (struct nouveau_bo *)-1;
+		gpuobj->im_backing_start = b_offset;
+	}
+
+	if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
+		dev_priv->engine.instmem.prepare_access(dev, true);
+		for (i = 0; i < gpuobj->im_pramin->size; i += 4)
+			nv_wo32(dev, gpuobj, i/4, 0);
+		dev_priv->engine.instmem.finish_access(dev);
+	}
+
+	if (pref) {
+		i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref);
+		if (i) {
+			nouveau_gpuobj_del(dev, &gpuobj);
+			return i;
+		}
+	}
+
+	if (pgpuobj)
+		*pgpuobj = gpuobj;
+	return 0;
+}
+
+
+static uint32_t
+nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	/*XXX: dodgy hack for now */
+	if (dev_priv->card_type >= NV_50)
+		return 24;
+	if (dev_priv->card_type >= NV_40)
+		return 32;
+	return 16;
+}
+
+/*
+   DMA objects are used to reference a piece of memory in the
+   framebuffer, PCI or AGP address space. Each object is 16 bytes big
+   and looks as follows:
+
+   entry[0]
+   11:0  class (seems like I can always use 0 here)
+   12    page table present?
+   13    page entry linear?
+   15:14 access: 0 rw, 1 ro, 2 wo
+   17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
+   31:20 dma adjust (bits 0-11 of the address)
+   entry[1]
+   dma limit (size of transfer)
+   entry[X]
+   1     0 readonly, 1 readwrite
+   31:12 dma frame address of the page (bits 12-31 of the address)
+   entry[N]
+   page table terminator, same value as the first pte, as does nvidia
+   rivatv uses 0xffffffff
+
+   Non linear page tables need a list of frame addresses afterwards,
+   the rivatv project has some info on this.
+
+   The method below creates a DMA object in instance RAM and returns a handle
+   to it that can be used to set up context objects.
+*/
+int
+nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
+		       uint64_t offset, uint64_t size, int access,
+		       int target, struct nouveau_gpuobj **gpuobj)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
+	int ret;
+
+	NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
+		 chan->id, class, offset, size);
+	NV_DEBUG(dev, "access=%d target=%d\n", access, target);
+
+	switch (target) {
+	case NV_DMA_TARGET_AGP:
+		offset += dev_priv->gart_info.aper_base;
+		break;
+	default:
+		break;
+	}
+
+	ret = nouveau_gpuobj_new(dev, chan,
+				 nouveau_gpuobj_class_instmem_size(dev, class),
+				 16, NVOBJ_FLAG_ZERO_ALLOC |
+				 NVOBJ_FLAG_ZERO_FREE, gpuobj);
+	if (ret) {
+		NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
+		return ret;
+	}
+
+	instmem->prepare_access(dev, true);
+
+	if (dev_priv->card_type < NV_50) {
+		uint32_t frame, adjust, pte_flags = 0;
+
+		if (access != NV_DMA_ACCESS_RO)
+			pte_flags |= (1<<1);
+		adjust = offset &  0x00000fff;
+		frame  = offset & ~0x00000fff;
+
+		nv_wo32(dev, *gpuobj, 0, ((1<<12) | (1<<13) |
+				(adjust << 20) |
+				 (access << 14) |
+				 (target << 16) |
+				  class));
+		nv_wo32(dev, *gpuobj, 1, size - 1);
+		nv_wo32(dev, *gpuobj, 2, frame | pte_flags);
+		nv_wo32(dev, *gpuobj, 3, frame | pte_flags);
+	} else {
+		uint64_t limit = offset + size - 1;
+		uint32_t flags0, flags5;
+
+		if (target == NV_DMA_TARGET_VIDMEM) {
+			flags0 = 0x00190000;
+			flags5 = 0x00010000;
+		} else {
+			flags0 = 0x7fc00000;
+			flags5 = 0x00080000;
+		}
+
+		nv_wo32(dev, *gpuobj, 0, flags0 | class);
+		nv_wo32(dev, *gpuobj, 1, lower_32_bits(limit));
+		nv_wo32(dev, *gpuobj, 2, lower_32_bits(offset));
+		nv_wo32(dev, *gpuobj, 3, ((upper_32_bits(limit) & 0xff) << 24) |
+					(upper_32_bits(offset) & 0xff));
+		nv_wo32(dev, *gpuobj, 5, flags5);
+	}
+
+	instmem->finish_access(dev);
+
+	(*gpuobj)->engine = NVOBJ_ENGINE_SW;
+	(*gpuobj)->class  = class;
+	return 0;
+}
+
+int
+nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
+			    uint64_t offset, uint64_t size, int access,
+			    struct nouveau_gpuobj **gpuobj,
+			    uint32_t *o_ret)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
+	    (dev_priv->card_type >= NV_50 &&
+	     dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
+		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+					     offset + dev_priv->vm_gart_base,
+					     size, access, NV_DMA_TARGET_AGP,
+					     gpuobj);
+		if (o_ret)
+			*o_ret = 0;
+	} else
+	if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
+		*gpuobj = dev_priv->gart_info.sg_ctxdma;
+		if (offset & ~0xffffffffULL) {
+			NV_ERROR(dev, "obj offset exceeds 32-bits\n");
+			return -EINVAL;
+		}
+		if (o_ret)
+			*o_ret = (uint32_t)offset;
+		ret = (*gpuobj != NULL) ? 0 : -EINVAL;
+	} else {
+		NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+/* Context objects in the instance RAM have the following structure.
+ * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
+
+   NV4 - NV30:
+
+   entry[0]
+   11:0 class
+   12   chroma key enable
+   13   user clip enable
+   14   swizzle enable
+   17:15 patch config:
+       scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
+   18   synchronize enable
+   19   endian: 1 big, 0 little
+   21:20 dither mode
+   23    single step enable
+   24    patch status: 0 invalid, 1 valid
+   25    context_surface 0: 1 valid
+   26    context surface 1: 1 valid
+   27    context pattern: 1 valid
+   28    context rop: 1 valid
+   29,30 context beta, beta4
+   entry[1]
+   7:0   mono format
+   15:8  color format
+   31:16 notify instance address
+   entry[2]
+   15:0  dma 0 instance address
+   31:16 dma 1 instance address
+   entry[3]
+   dma method traps
+
+   NV40:
+   No idea what the exact format is. Here's what can be deducted:
+
+   entry[0]:
+   11:0  class  (maybe uses more bits here?)
+   17    user clip enable
+   21:19 patch config
+   25    patch status valid ?
+   entry[1]:
+   15:0  DMA notifier  (maybe 20:0)
+   entry[2]:
+   15:0  DMA 0 instance (maybe 20:0)
+   24    big endian
+   entry[3]:
+   15:0  DMA 1 instance (maybe 20:0)
+   entry[4]:
+   entry[5]:
+   set to 0?
+*/
+int
+nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
+		      struct nouveau_gpuobj **gpuobj)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int ret;
+
+	NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
+
+	ret = nouveau_gpuobj_new(dev, chan,
+				 nouveau_gpuobj_class_instmem_size(dev, class),
+				 16,
+				 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
+				 gpuobj);
+	if (ret) {
+		NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
+		return ret;
+	}
+
+	dev_priv->engine.instmem.prepare_access(dev, true);
+	if (dev_priv->card_type >= NV_50) {
+		nv_wo32(dev, *gpuobj, 0, class);
+		nv_wo32(dev, *gpuobj, 5, 0x00010000);
+	} else {
+		switch (class) {
+		case NV_CLASS_NULL:
+			nv_wo32(dev, *gpuobj, 0, 0x00001030);
+			nv_wo32(dev, *gpuobj, 1, 0xFFFFFFFF);
+			break;
+		default:
+			if (dev_priv->card_type >= NV_40) {
+				nv_wo32(dev, *gpuobj, 0, class);
+#ifdef __BIG_ENDIAN
+				nv_wo32(dev, *gpuobj, 2, 0x01000000);
+#endif
+			} else {
+#ifdef __BIG_ENDIAN
+				nv_wo32(dev, *gpuobj, 0, class | 0x00080000);
+#else
+				nv_wo32(dev, *gpuobj, 0, class);
+#endif
+			}
+		}
+	}
+	dev_priv->engine.instmem.finish_access(dev);
+
+	(*gpuobj)->engine = NVOBJ_ENGINE_GR;
+	(*gpuobj)->class  = class;
+	return 0;
+}
+
+static int
+nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
+		      struct nouveau_gpuobj **gpuobj_ret)
+{
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+	struct nouveau_gpuobj *gpuobj;
+
+	if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
+		return -EINVAL;
+
+	gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
+	if (!gpuobj)
+		return -ENOMEM;
+	gpuobj->engine = NVOBJ_ENGINE_SW;
+	gpuobj->class = class;
+
+	list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+	*gpuobj_ret = gpuobj;
+	return 0;
+}
+
+static int
+nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj *pramin = NULL;
+	uint32_t size;
+	uint32_t base;
+	int ret;
+
+	NV_DEBUG(dev, "ch%d\n", chan->id);
+
+	/* Base amount for object storage (4KiB enough?) */
+	size = 0x1000;
+	base = 0;
+
+	/* PGRAPH context */
+
+	if (dev_priv->card_type == NV_50) {
+		/* Various fixed table thingos */
+		size += 0x1400; /* mostly unknown stuff */
+		size += 0x4000; /* vm pd */
+		base  = 0x6000;
+		/* RAMHT, not sure about setting size yet, 32KiB to be safe */
+		size += 0x8000;
+		/* RAMFC */
+		size += 0x1000;
+		/* PGRAPH context */
+		size += 0x70000;
+	}
+
+	NV_DEBUG(dev, "ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n",
+		 chan->id, size, base);
+	ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0,
+				     &chan->ramin);
+	if (ret) {
+		NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
+		return ret;
+	}
+	pramin = chan->ramin->gpuobj;
+
+	ret = nouveau_mem_init_heap(&chan->ramin_heap,
+				    pramin->im_pramin->start + base, size);
+	if (ret) {
+		NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
+		nouveau_gpuobj_ref_del(dev, &chan->ramin);
+		return ret;
+	}
+
+	return 0;
+}
+
+int
+nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
+			    uint32_t vram_h, uint32_t tt_h)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
+	struct nouveau_gpuobj *vram = NULL, *tt = NULL;
+	int ret, i;
+
+	INIT_LIST_HEAD(&chan->ramht_refs);
+
+	NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
+
+	/* Reserve a block of PRAMIN for the channel
+	 *XXX: maybe on <NV50 too at some point
+	 */
+	if (0 || dev_priv->card_type == NV_50) {
+		ret = nouveau_gpuobj_channel_init_pramin(chan);
+		if (ret) {
+			NV_ERROR(dev, "init pramin\n");
+			return ret;
+		}
+	}
+
+	/* NV50 VM
+	 *  - Allocate per-channel page-directory
+	 *  - Map GART and VRAM into the channel's address space at the
+	 *    locations determined during init.
+	 */
+	if (dev_priv->card_type >= NV_50) {
+		uint32_t vm_offset, pde;
+
+		instmem->prepare_access(dev, true);
+
+		vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
+		vm_offset += chan->ramin->gpuobj->im_pramin->start;
+
+		ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
+							0, &chan->vm_pd, NULL);
+		if (ret) {
+			instmem->finish_access(dev);
+			return ret;
+		}
+		for (i = 0; i < 0x4000; i += 8) {
+			nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000);
+			nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe);
+		}
+
+		pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 2;
+		ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
+					     dev_priv->gart_info.sg_ctxdma,
+					     &chan->vm_gart_pt);
+		if (ret) {
+			instmem->finish_access(dev);
+			return ret;
+		}
+		nv_wo32(dev, chan->vm_pd, pde++,
+			    chan->vm_gart_pt->instance | 0x03);
+		nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
+
+		pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 2;
+		for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
+			ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
+						     dev_priv->vm_vram_pt[i],
+						     &chan->vm_vram_pt[i]);
+			if (ret) {
+				instmem->finish_access(dev);
+				return ret;
+			}
+
+			nv_wo32(dev, chan->vm_pd, pde++,
+				    chan->vm_vram_pt[i]->instance | 0x61);
+			nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
+		}
+
+		instmem->finish_access(dev);
+	}
+
+	/* RAMHT */
+	if (dev_priv->card_type < NV_50) {
+		ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht,
+					     &chan->ramht);
+		if (ret)
+			return ret;
+	} else {
+		ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0,
+					     0x8000, 16,
+					     NVOBJ_FLAG_ZERO_ALLOC,
+					     &chan->ramht);
+		if (ret)
+			return ret;
+	}
+
+	/* VRAM ctxdma */
+	if (dev_priv->card_type >= NV_50) {
+		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+					     0, dev_priv->vm_end,
+					     NV_DMA_ACCESS_RW,
+					     NV_DMA_TARGET_AGP, &vram);
+		if (ret) {
+			NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
+			return ret;
+		}
+	} else {
+		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+						0, dev_priv->fb_available_size,
+						NV_DMA_ACCESS_RW,
+						NV_DMA_TARGET_VIDMEM, &vram);
+		if (ret) {
+			NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
+			return ret;
+		}
+	}
+
+	ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL);
+	if (ret) {
+		NV_ERROR(dev, "Error referencing VRAM ctxdma: %d\n", ret);
+		return ret;
+	}
+
+	/* TT memory ctxdma */
+	if (dev_priv->card_type >= NV_50) {
+		tt = vram;
+	} else
+	if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
+		ret = nouveau_gpuobj_gart_dma_new(chan, 0,
+						  dev_priv->gart_info.aper_size,
+						  NV_DMA_ACCESS_RW, &tt, NULL);
+	} else {
+		NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
+		ret = -EINVAL;
+	}
+
+	if (ret) {
+		NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
+		return ret;
+	}
+
+	ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL);
+	if (ret) {
+		NV_ERROR(dev, "Error referencing TT ctxdma: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+void
+nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
+{
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+	struct drm_device *dev = chan->dev;
+	struct list_head *entry, *tmp;
+	struct nouveau_gpuobj_ref *ref;
+	int i;
+
+	NV_DEBUG(dev, "ch%d\n", chan->id);
+
+	if (!chan->ramht_refs.next)
+		return;
+
+	list_for_each_safe(entry, tmp, &chan->ramht_refs) {
+		ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
+
+		nouveau_gpuobj_ref_del(dev, &ref);
+	}
+
+	nouveau_gpuobj_ref_del(dev, &chan->ramht);
+
+	nouveau_gpuobj_del(dev, &chan->vm_pd);
+	nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt);
+	for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
+		nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
+
+	if (chan->ramin_heap)
+		nouveau_mem_takedown(&chan->ramin_heap);
+	if (chan->ramin)
+		nouveau_gpuobj_ref_del(dev, &chan->ramin);
+
+}
+
+int
+nouveau_gpuobj_suspend(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj *gpuobj;
+	int i;
+
+	if (dev_priv->card_type < NV_50) {
+		dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram);
+		if (!dev_priv->susres.ramin_copy)
+			return -ENOMEM;
+
+		for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
+			dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i);
+		return 0;
+	}
+
+	list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
+		if (!gpuobj->im_backing || (gpuobj->flags & NVOBJ_FLAG_FAKE))
+			continue;
+
+		gpuobj->im_backing_suspend = vmalloc(gpuobj->im_pramin->size);
+		if (!gpuobj->im_backing_suspend) {
+			nouveau_gpuobj_resume(dev);
+			return -ENOMEM;
+		}
+
+		dev_priv->engine.instmem.prepare_access(dev, false);
+		for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
+			gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i);
+		dev_priv->engine.instmem.finish_access(dev);
+	}
+
+	return 0;
+}
+
+void
+nouveau_gpuobj_suspend_cleanup(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj *gpuobj;
+
+	if (dev_priv->card_type < NV_50) {
+		vfree(dev_priv->susres.ramin_copy);
+		dev_priv->susres.ramin_copy = NULL;
+		return;
+	}
+
+	list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
+		if (!gpuobj->im_backing_suspend)
+			continue;
+
+		vfree(gpuobj->im_backing_suspend);
+		gpuobj->im_backing_suspend = NULL;
+	}
+}
+
+void
+nouveau_gpuobj_resume(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj *gpuobj;
+	int i;
+
+	if (dev_priv->card_type < NV_50) {
+		for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
+			nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]);
+		nouveau_gpuobj_suspend_cleanup(dev);
+		return;
+	}
+
+	list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
+		if (!gpuobj->im_backing_suspend)
+			continue;
+
+		dev_priv->engine.instmem.prepare_access(dev, true);
+		for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
+			nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]);
+		dev_priv->engine.instmem.finish_access(dev);
+	}
+
+	nouveau_gpuobj_suspend_cleanup(dev);
+}
+
+int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct drm_nouveau_grobj_alloc *init = data;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nouveau_pgraph_object_class *grc;
+	struct nouveau_gpuobj *gr = NULL;
+	struct nouveau_channel *chan;
+	int ret;
+
+	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
+
+	if (init->handle == ~0)
+		return -EINVAL;
+
+	grc = pgraph->grclass;
+	while (grc->id) {
+		if (grc->id == init->class)
+			break;
+		grc++;
+	}
+
+	if (!grc->id) {
+		NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class);
+		return -EPERM;
+	}
+
+	if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0)
+		return -EEXIST;
+
+	if (!grc->software)
+		ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
+	else
+		ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
+
+	if (ret) {
+		NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
+			 ret, init->channel, init->handle);
+		return ret;
+	}
+
+	ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL);
+	if (ret) {
+		NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
+			 ret, init->channel, init->handle);
+		nouveau_gpuobj_del(dev, &gr);
+		return ret;
+	}
+
+	return 0;
+}
+
+int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct drm_nouveau_gpuobj_free *objfree = data;
+	struct nouveau_gpuobj_ref *ref;
+	struct nouveau_channel *chan;
+	int ret;
+
+	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
+
+	ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref);
+	if (ret)
+		return ret;
+	nouveau_gpuobj_ref_del(dev, &ref);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
new file mode 100644
index 0000000..fa1b0e7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -0,0 +1,836 @@
+
+
+#define NV03_BOOT_0                                        0x00100000
+#    define NV03_BOOT_0_RAM_AMOUNT                         0x00000003
+#    define NV03_BOOT_0_RAM_AMOUNT_8MB                     0x00000000
+#    define NV03_BOOT_0_RAM_AMOUNT_2MB                     0x00000001
+#    define NV03_BOOT_0_RAM_AMOUNT_4MB                     0x00000002
+#    define NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM               0x00000003
+#    define NV04_BOOT_0_RAM_AMOUNT_32MB                    0x00000000
+#    define NV04_BOOT_0_RAM_AMOUNT_4MB                     0x00000001
+#    define NV04_BOOT_0_RAM_AMOUNT_8MB                     0x00000002
+#    define NV04_BOOT_0_RAM_AMOUNT_16MB                    0x00000003
+
+#define NV04_FIFO_DATA                                     0x0010020c
+#    define NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK              0xfff00000
+#    define NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT             20
+
+#define NV_RAMIN                                           0x00700000
+
+#define NV_RAMHT_HANDLE_OFFSET                             0
+#define NV_RAMHT_CONTEXT_OFFSET                            4
+#    define NV_RAMHT_CONTEXT_VALID                         (1<<31)
+#    define NV_RAMHT_CONTEXT_CHANNEL_SHIFT                 24
+#    define NV_RAMHT_CONTEXT_ENGINE_SHIFT                  16
+#        define NV_RAMHT_CONTEXT_ENGINE_SOFTWARE           0
+#        define NV_RAMHT_CONTEXT_ENGINE_GRAPHICS           1
+#    define NV_RAMHT_CONTEXT_INSTANCE_SHIFT                0
+#    define NV40_RAMHT_CONTEXT_CHANNEL_SHIFT               23
+#    define NV40_RAMHT_CONTEXT_ENGINE_SHIFT                20
+#    define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT              0
+
+/* DMA object defines */
+#define NV_DMA_ACCESS_RW 0
+#define NV_DMA_ACCESS_RO 1
+#define NV_DMA_ACCESS_WO 2
+#define NV_DMA_TARGET_VIDMEM 0
+#define NV_DMA_TARGET_PCI    2
+#define NV_DMA_TARGET_AGP    3
+/* The following is not a real value used by the card, it's changed by
+ * nouveau_object_dma_create */
+#define NV_DMA_TARGET_PCI_NONLINEAR 8
+
+/* Some object classes we care about in the drm */
+#define NV_CLASS_DMA_FROM_MEMORY                           0x00000002
+#define NV_CLASS_DMA_TO_MEMORY                             0x00000003
+#define NV_CLASS_NULL                                      0x00000030
+#define NV_CLASS_DMA_IN_MEMORY                             0x0000003D
+
+#define NV03_USER(i)                             (0x00800000+(i*NV03_USER_SIZE))
+#define NV03_USER__SIZE                                                       16
+#define NV10_USER__SIZE                                                       32
+#define NV03_USER_SIZE                                                0x00010000
+#define NV03_USER_DMA_PUT(i)                     (0x00800040+(i*NV03_USER_SIZE))
+#define NV03_USER_DMA_PUT__SIZE                                               16
+#define NV10_USER_DMA_PUT__SIZE                                               32
+#define NV03_USER_DMA_GET(i)                     (0x00800044+(i*NV03_USER_SIZE))
+#define NV03_USER_DMA_GET__SIZE                                               16
+#define NV10_USER_DMA_GET__SIZE                                               32
+#define NV03_USER_REF_CNT(i)                     (0x00800048+(i*NV03_USER_SIZE))
+#define NV03_USER_REF_CNT__SIZE                                               16
+#define NV10_USER_REF_CNT__SIZE                                               32
+
+#define NV40_USER(i)                             (0x00c00000+(i*NV40_USER_SIZE))
+#define NV40_USER_SIZE                                                0x00001000
+#define NV40_USER_DMA_PUT(i)                     (0x00c00040+(i*NV40_USER_SIZE))
+#define NV40_USER_DMA_PUT__SIZE                                               32
+#define NV40_USER_DMA_GET(i)                     (0x00c00044+(i*NV40_USER_SIZE))
+#define NV40_USER_DMA_GET__SIZE                                               32
+#define NV40_USER_REF_CNT(i)                     (0x00c00048+(i*NV40_USER_SIZE))
+#define NV40_USER_REF_CNT__SIZE                                               32
+
+#define NV50_USER(i)                             (0x00c00000+(i*NV50_USER_SIZE))
+#define NV50_USER_SIZE                                                0x00002000
+#define NV50_USER_DMA_PUT(i)                     (0x00c00040+(i*NV50_USER_SIZE))
+#define NV50_USER_DMA_PUT__SIZE                                              128
+#define NV50_USER_DMA_GET(i)                     (0x00c00044+(i*NV50_USER_SIZE))
+#define NV50_USER_DMA_GET__SIZE                                              128
+#define NV50_USER_REF_CNT(i)                     (0x00c00048+(i*NV50_USER_SIZE))
+#define NV50_USER_REF_CNT__SIZE                                              128
+
+#define NV03_FIFO_SIZE                                     0x8000UL
+
+#define NV03_PMC_BOOT_0                                    0x00000000
+#define NV03_PMC_BOOT_1                                    0x00000004
+#define NV03_PMC_INTR_0                                    0x00000100
+#    define NV_PMC_INTR_0_PFIFO_PENDING                        (1<<8)
+#    define NV_PMC_INTR_0_PGRAPH_PENDING                      (1<<12)
+#    define NV_PMC_INTR_0_NV50_I2C_PENDING                    (1<<21)
+#    define NV_PMC_INTR_0_CRTC0_PENDING                       (1<<24)
+#    define NV_PMC_INTR_0_CRTC1_PENDING                       (1<<25)
+#    define NV_PMC_INTR_0_NV50_DISPLAY_PENDING                (1<<26)
+#    define NV_PMC_INTR_0_CRTCn_PENDING                       (3<<24)
+#define NV03_PMC_INTR_EN_0                                 0x00000140
+#    define NV_PMC_INTR_EN_0_MASTER_ENABLE                     (1<<0)
+#define NV03_PMC_ENABLE                                    0x00000200
+#    define NV_PMC_ENABLE_PFIFO                                (1<<8)
+#    define NV_PMC_ENABLE_PGRAPH                              (1<<12)
+/* Disabling the below bit breaks newer (G7X only?) mobile chipsets,
+ * the card will hang early on in the X init process.
+ */
+#    define NV_PMC_ENABLE_UNK13                               (1<<13)
+#define NV40_PMC_BACKLIGHT				   0x000015f0
+#	define NV40_PMC_BACKLIGHT_MASK			   0x001f0000
+#define NV40_PMC_1700                                      0x00001700
+#define NV40_PMC_1704                                      0x00001704
+#define NV40_PMC_1708                                      0x00001708
+#define NV40_PMC_170C                                      0x0000170C
+
+/* probably PMC ? */
+#define NV50_PUNK_BAR0_PRAMIN                              0x00001700
+#define NV50_PUNK_BAR_CFG_BASE                             0x00001704
+#define NV50_PUNK_BAR_CFG_BASE_VALID                          (1<<30)
+#define NV50_PUNK_BAR1_CTXDMA                              0x00001708
+#define NV50_PUNK_BAR1_CTXDMA_VALID                           (1<<31)
+#define NV50_PUNK_BAR3_CTXDMA                              0x0000170C
+#define NV50_PUNK_BAR3_CTXDMA_VALID                           (1<<31)
+#define NV50_PUNK_UNK1710                                  0x00001710
+
+#define NV04_PBUS_PCI_NV_1                                 0x00001804
+#define NV04_PBUS_PCI_NV_19                                0x0000184C
+#define NV04_PBUS_PCI_NV_20				0x00001850
+#	define NV04_PBUS_PCI_NV_20_ROM_SHADOW_DISABLED		(0 << 0)
+#	define NV04_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED		(1 << 0)
+
+#define NV04_PTIMER_INTR_0                                 0x00009100
+#define NV04_PTIMER_INTR_EN_0                              0x00009140
+#define NV04_PTIMER_NUMERATOR                              0x00009200
+#define NV04_PTIMER_DENOMINATOR                            0x00009210
+#define NV04_PTIMER_TIME_0                                 0x00009400
+#define NV04_PTIMER_TIME_1                                 0x00009410
+#define NV04_PTIMER_ALARM_0                                0x00009420
+
+#define NV04_PFB_CFG0                                      0x00100200
+#define NV04_PFB_CFG1                                      0x00100204
+#define NV40_PFB_020C                                      0x0010020C
+#define NV10_PFB_TILE(i)                                   (0x00100240 + (i*16))
+#define NV10_PFB_TILE__SIZE                                8
+#define NV10_PFB_TLIMIT(i)                                 (0x00100244 + (i*16))
+#define NV10_PFB_TSIZE(i)                                  (0x00100248 + (i*16))
+#define NV10_PFB_TSTATUS(i)                                (0x0010024C + (i*16))
+#define NV10_PFB_CLOSE_PAGE2                               0x0010033C
+#define NV40_PFB_TILE(i)                                   (0x00100600 + (i*16))
+#define NV40_PFB_TILE__SIZE_0                              12
+#define NV40_PFB_TILE__SIZE_1                              15
+#define NV40_PFB_TLIMIT(i)                                 (0x00100604 + (i*16))
+#define NV40_PFB_TSIZE(i)                                  (0x00100608 + (i*16))
+#define NV40_PFB_TSTATUS(i)                                (0x0010060C + (i*16))
+#define NV40_PFB_UNK_800					0x00100800
+
+#define NV04_PGRAPH_DEBUG_0                                0x00400080
+#define NV04_PGRAPH_DEBUG_1                                0x00400084
+#define NV04_PGRAPH_DEBUG_2                                0x00400088
+#define NV04_PGRAPH_DEBUG_3                                0x0040008c
+#define NV10_PGRAPH_DEBUG_4                                0x00400090
+#define NV03_PGRAPH_INTR                                   0x00400100
+#define NV03_PGRAPH_NSTATUS                                0x00400104
+#    define NV04_PGRAPH_NSTATUS_STATE_IN_USE                  (1<<11)
+#    define NV04_PGRAPH_NSTATUS_INVALID_STATE                 (1<<12)
+#    define NV04_PGRAPH_NSTATUS_BAD_ARGUMENT                  (1<<13)
+#    define NV04_PGRAPH_NSTATUS_PROTECTION_FAULT              (1<<14)
+#    define NV10_PGRAPH_NSTATUS_STATE_IN_USE                  (1<<23)
+#    define NV10_PGRAPH_NSTATUS_INVALID_STATE                 (1<<24)
+#    define NV10_PGRAPH_NSTATUS_BAD_ARGUMENT                  (1<<25)
+#    define NV10_PGRAPH_NSTATUS_PROTECTION_FAULT              (1<<26)
+#define NV03_PGRAPH_NSOURCE                                0x00400108
+#    define NV03_PGRAPH_NSOURCE_NOTIFICATION                   (1<<0)
+#    define NV03_PGRAPH_NSOURCE_DATA_ERROR                     (1<<1)
+#    define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR               (1<<2)
+#    define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION                (1<<3)
+#    define NV03_PGRAPH_NSOURCE_LIMIT_COLOR                    (1<<4)
+#    define NV03_PGRAPH_NSOURCE_LIMIT_ZETA                     (1<<5)
+#    define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD                   (1<<6)
+#    define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION               (1<<7)
+#    define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION               (1<<8)
+#    define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION               (1<<9)
+#    define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION               (1<<10)
+#    define NV03_PGRAPH_NSOURCE_STATE_INVALID                 (1<<11)
+#    define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY                 (1<<12)
+#    define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE                 (1<<13)
+#    define NV03_PGRAPH_NSOURCE_METHOD_CNT                    (1<<14)
+#    define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION              (1<<15)
+#    define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION            (1<<16)
+#    define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A                   (1<<17)
+#    define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B                   (1<<18)
+#define NV03_PGRAPH_INTR_EN                                0x00400140
+#define NV40_PGRAPH_INTR_EN                                0x0040013C
+#    define NV_PGRAPH_INTR_NOTIFY                              (1<<0)
+#    define NV_PGRAPH_INTR_MISSING_HW                          (1<<4)
+#    define NV_PGRAPH_INTR_CONTEXT_SWITCH                     (1<<12)
+#    define NV_PGRAPH_INTR_BUFFER_NOTIFY                      (1<<16)
+#    define NV_PGRAPH_INTR_ERROR                              (1<<20)
+#define NV10_PGRAPH_CTX_CONTROL                            0x00400144
+#define NV10_PGRAPH_CTX_USER                               0x00400148
+#define NV10_PGRAPH_CTX_SWITCH1                            0x0040014C
+#define NV10_PGRAPH_CTX_SWITCH2                            0x00400150
+#define NV10_PGRAPH_CTX_SWITCH3                            0x00400154
+#define NV10_PGRAPH_CTX_SWITCH4                            0x00400158
+#define NV10_PGRAPH_CTX_SWITCH5                            0x0040015C
+#define NV04_PGRAPH_CTX_SWITCH1                            0x00400160
+#define NV10_PGRAPH_CTX_CACHE1                             0x00400160
+#define NV04_PGRAPH_CTX_SWITCH2                            0x00400164
+#define NV04_PGRAPH_CTX_SWITCH3                            0x00400168
+#define NV04_PGRAPH_CTX_SWITCH4                            0x0040016C
+#define NV04_PGRAPH_CTX_CONTROL                            0x00400170
+#define NV04_PGRAPH_CTX_USER                               0x00400174
+#define NV04_PGRAPH_CTX_CACHE1                             0x00400180
+#define NV10_PGRAPH_CTX_CACHE2                             0x00400180
+#define NV03_PGRAPH_CTX_CONTROL                            0x00400190
+#define NV03_PGRAPH_CTX_USER                               0x00400194
+#define NV04_PGRAPH_CTX_CACHE2                             0x004001A0
+#define NV10_PGRAPH_CTX_CACHE3                             0x004001A0
+#define NV04_PGRAPH_CTX_CACHE3                             0x004001C0
+#define NV10_PGRAPH_CTX_CACHE4                             0x004001C0
+#define NV04_PGRAPH_CTX_CACHE4                             0x004001E0
+#define NV10_PGRAPH_CTX_CACHE5                             0x004001E0
+#define NV40_PGRAPH_CTXCTL_0304                            0x00400304
+#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX                   0x00000001
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT                      0x00400308
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK              0xff000000
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT                     24
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK              0x00ffffff
+#define NV40_PGRAPH_CTXCTL_0310                            0x00400310
+#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE                  0x00000020
+#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD                  0x00000040
+#define NV40_PGRAPH_CTXCTL_030C                            0x0040030c
+#define NV40_PGRAPH_CTXCTL_UCODE_INDEX                     0x00400324
+#define NV40_PGRAPH_CTXCTL_UCODE_DATA                      0x00400328
+#define NV40_PGRAPH_CTXCTL_CUR                             0x0040032c
+#define NV40_PGRAPH_CTXCTL_CUR_LOADED                      0x01000000
+#define NV40_PGRAPH_CTXCTL_CUR_INSTANCE                    0x000FFFFF
+#define NV40_PGRAPH_CTXCTL_NEXT                            0x00400330
+#define NV40_PGRAPH_CTXCTL_NEXT_INSTANCE                   0x000fffff
+#define NV50_PGRAPH_CTXCTL_CUR                             0x0040032c
+#define NV50_PGRAPH_CTXCTL_CUR_LOADED                      0x80000000
+#define NV50_PGRAPH_CTXCTL_CUR_INSTANCE                    0x00ffffff
+#define NV50_PGRAPH_CTXCTL_NEXT                            0x00400330
+#define NV50_PGRAPH_CTXCTL_NEXT_INSTANCE                   0x00ffffff
+#define NV03_PGRAPH_ABS_X_RAM                              0x00400400
+#define NV03_PGRAPH_ABS_Y_RAM                              0x00400480
+#define NV03_PGRAPH_X_MISC                                 0x00400500
+#define NV03_PGRAPH_Y_MISC                                 0x00400504
+#define NV04_PGRAPH_VALID1                                 0x00400508
+#define NV04_PGRAPH_SOURCE_COLOR                           0x0040050C
+#define NV04_PGRAPH_MISC24_0                               0x00400510
+#define NV03_PGRAPH_XY_LOGIC_MISC0                         0x00400514
+#define NV03_PGRAPH_XY_LOGIC_MISC1                         0x00400518
+#define NV03_PGRAPH_XY_LOGIC_MISC2                         0x0040051C
+#define NV03_PGRAPH_XY_LOGIC_MISC3                         0x00400520
+#define NV03_PGRAPH_CLIPX_0                                0x00400524
+#define NV03_PGRAPH_CLIPX_1                                0x00400528
+#define NV03_PGRAPH_CLIPY_0                                0x0040052C
+#define NV03_PGRAPH_CLIPY_1                                0x00400530
+#define NV03_PGRAPH_ABS_ICLIP_XMAX                         0x00400534
+#define NV03_PGRAPH_ABS_ICLIP_YMAX                         0x00400538
+#define NV03_PGRAPH_ABS_UCLIP_XMIN                         0x0040053C
+#define NV03_PGRAPH_ABS_UCLIP_YMIN                         0x00400540
+#define NV03_PGRAPH_ABS_UCLIP_XMAX                         0x00400544
+#define NV03_PGRAPH_ABS_UCLIP_YMAX                         0x00400548
+#define NV03_PGRAPH_ABS_UCLIPA_XMIN                        0x00400560
+#define NV03_PGRAPH_ABS_UCLIPA_YMIN                        0x00400564
+#define NV03_PGRAPH_ABS_UCLIPA_XMAX                        0x00400568
+#define NV03_PGRAPH_ABS_UCLIPA_YMAX                        0x0040056C
+#define NV04_PGRAPH_MISC24_1                               0x00400570
+#define NV04_PGRAPH_MISC24_2                               0x00400574
+#define NV04_PGRAPH_VALID2                                 0x00400578
+#define NV04_PGRAPH_PASSTHRU_0                             0x0040057C
+#define NV04_PGRAPH_PASSTHRU_1                             0x00400580
+#define NV04_PGRAPH_PASSTHRU_2                             0x00400584
+#define NV10_PGRAPH_DIMX_TEXTURE                           0x00400588
+#define NV10_PGRAPH_WDIMX_TEXTURE                          0x0040058C
+#define NV04_PGRAPH_COMBINE_0_ALPHA                        0x00400590
+#define NV04_PGRAPH_COMBINE_0_COLOR                        0x00400594
+#define NV04_PGRAPH_COMBINE_1_ALPHA                        0x00400598
+#define NV04_PGRAPH_COMBINE_1_COLOR                        0x0040059C
+#define NV04_PGRAPH_FORMAT_0                               0x004005A8
+#define NV04_PGRAPH_FORMAT_1                               0x004005AC
+#define NV04_PGRAPH_FILTER_0                               0x004005B0
+#define NV04_PGRAPH_FILTER_1                               0x004005B4
+#define NV03_PGRAPH_MONO_COLOR0                            0x00400600
+#define NV04_PGRAPH_ROP3                                   0x00400604
+#define NV04_PGRAPH_BETA_AND                               0x00400608
+#define NV04_PGRAPH_BETA_PREMULT                           0x0040060C
+#define NV04_PGRAPH_LIMIT_VIOL_PIX                         0x00400610
+#define NV04_PGRAPH_FORMATS                                0x00400618
+#define NV10_PGRAPH_DEBUG_2                                0x00400620
+#define NV04_PGRAPH_BOFFSET0                               0x00400640
+#define NV04_PGRAPH_BOFFSET1                               0x00400644
+#define NV04_PGRAPH_BOFFSET2                               0x00400648
+#define NV04_PGRAPH_BOFFSET3                               0x0040064C
+#define NV04_PGRAPH_BOFFSET4                               0x00400650
+#define NV04_PGRAPH_BOFFSET5                               0x00400654
+#define NV04_PGRAPH_BBASE0                                 0x00400658
+#define NV04_PGRAPH_BBASE1                                 0x0040065C
+#define NV04_PGRAPH_BBASE2                                 0x00400660
+#define NV04_PGRAPH_BBASE3                                 0x00400664
+#define NV04_PGRAPH_BBASE4                                 0x00400668
+#define NV04_PGRAPH_BBASE5                                 0x0040066C
+#define NV04_PGRAPH_BPITCH0                                0x00400670
+#define NV04_PGRAPH_BPITCH1                                0x00400674
+#define NV04_PGRAPH_BPITCH2                                0x00400678
+#define NV04_PGRAPH_BPITCH3                                0x0040067C
+#define NV04_PGRAPH_BPITCH4                                0x00400680
+#define NV04_PGRAPH_BLIMIT0                                0x00400684
+#define NV04_PGRAPH_BLIMIT1                                0x00400688
+#define NV04_PGRAPH_BLIMIT2                                0x0040068C
+#define NV04_PGRAPH_BLIMIT3                                0x00400690
+#define NV04_PGRAPH_BLIMIT4                                0x00400694
+#define NV04_PGRAPH_BLIMIT5                                0x00400698
+#define NV04_PGRAPH_BSWIZZLE2                              0x0040069C
+#define NV04_PGRAPH_BSWIZZLE5                              0x004006A0
+#define NV03_PGRAPH_STATUS                                 0x004006B0
+#define NV04_PGRAPH_STATUS                                 0x00400700
+#define NV04_PGRAPH_TRAPPED_ADDR                           0x00400704
+#define NV04_PGRAPH_TRAPPED_DATA                           0x00400708
+#define NV04_PGRAPH_SURFACE                                0x0040070C
+#define NV10_PGRAPH_TRAPPED_DATA_HIGH                      0x0040070C
+#define NV04_PGRAPH_STATE                                  0x00400710
+#define NV10_PGRAPH_SURFACE                                0x00400710
+#define NV04_PGRAPH_NOTIFY                                 0x00400714
+#define NV10_PGRAPH_STATE                                  0x00400714
+#define NV10_PGRAPH_NOTIFY                                 0x00400718
+
+#define NV04_PGRAPH_FIFO                                   0x00400720
+
+#define NV04_PGRAPH_BPIXEL                                 0x00400724
+#define NV10_PGRAPH_RDI_INDEX                              0x00400750
+#define NV04_PGRAPH_FFINTFC_ST2                            0x00400754
+#define NV10_PGRAPH_RDI_DATA                               0x00400754
+#define NV04_PGRAPH_DMA_PITCH                              0x00400760
+#define NV10_PGRAPH_FFINTFC_ST2                            0x00400764
+#define NV04_PGRAPH_DVD_COLORFMT                           0x00400764
+#define NV04_PGRAPH_SCALED_FORMAT                          0x00400768
+#define NV10_PGRAPH_DMA_PITCH                              0x00400770
+#define NV10_PGRAPH_DVD_COLORFMT                           0x00400774
+#define NV10_PGRAPH_SCALED_FORMAT                          0x00400778
+#define NV20_PGRAPH_CHANNEL_CTX_TABLE                      0x00400780
+#define NV20_PGRAPH_CHANNEL_CTX_POINTER                    0x00400784
+#define NV20_PGRAPH_CHANNEL_CTX_XFER                       0x00400788
+#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD                  0x00000001
+#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE                  0x00000002
+#define NV04_PGRAPH_PATT_COLOR0                            0x00400800
+#define NV04_PGRAPH_PATT_COLOR1                            0x00400804
+#define NV04_PGRAPH_PATTERN                                0x00400808
+#define NV04_PGRAPH_PATTERN_SHAPE                          0x00400810
+#define NV04_PGRAPH_CHROMA                                 0x00400814
+#define NV04_PGRAPH_CONTROL0                               0x00400818
+#define NV04_PGRAPH_CONTROL1                               0x0040081C
+#define NV04_PGRAPH_CONTROL2                               0x00400820
+#define NV04_PGRAPH_BLEND                                  0x00400824
+#define NV04_PGRAPH_STORED_FMT                             0x00400830
+#define NV04_PGRAPH_PATT_COLORRAM                          0x00400900
+#define NV40_PGRAPH_TILE0(i)                               (0x00400900 + (i*16))
+#define NV40_PGRAPH_TLIMIT0(i)                             (0x00400904 + (i*16))
+#define NV40_PGRAPH_TSIZE0(i)                              (0x00400908 + (i*16))
+#define NV40_PGRAPH_TSTATUS0(i)                            (0x0040090C + (i*16))
+#define NV10_PGRAPH_TILE(i)                                (0x00400B00 + (i*16))
+#define NV10_PGRAPH_TLIMIT(i)                              (0x00400B04 + (i*16))
+#define NV10_PGRAPH_TSIZE(i)                               (0x00400B08 + (i*16))
+#define NV10_PGRAPH_TSTATUS(i)                             (0x00400B0C + (i*16))
+#define NV04_PGRAPH_U_RAM                                  0x00400D00
+#define NV47_PGRAPH_TILE0(i)                               (0x00400D00 + (i*16))
+#define NV47_PGRAPH_TLIMIT0(i)                             (0x00400D04 + (i*16))
+#define NV47_PGRAPH_TSIZE0(i)                              (0x00400D08 + (i*16))
+#define NV47_PGRAPH_TSTATUS0(i)                            (0x00400D0C + (i*16))
+#define NV04_PGRAPH_V_RAM                                  0x00400D40
+#define NV04_PGRAPH_W_RAM                                  0x00400D80
+#define NV10_PGRAPH_COMBINER0_IN_ALPHA                     0x00400E40
+#define NV10_PGRAPH_COMBINER1_IN_ALPHA                     0x00400E44
+#define NV10_PGRAPH_COMBINER0_IN_RGB                       0x00400E48
+#define NV10_PGRAPH_COMBINER1_IN_RGB                       0x00400E4C
+#define NV10_PGRAPH_COMBINER_COLOR0                        0x00400E50
+#define NV10_PGRAPH_COMBINER_COLOR1                        0x00400E54
+#define NV10_PGRAPH_COMBINER0_OUT_ALPHA                    0x00400E58
+#define NV10_PGRAPH_COMBINER1_OUT_ALPHA                    0x00400E5C
+#define NV10_PGRAPH_COMBINER0_OUT_RGB                      0x00400E60
+#define NV10_PGRAPH_COMBINER1_OUT_RGB                      0x00400E64
+#define NV10_PGRAPH_COMBINER_FINAL0                        0x00400E68
+#define NV10_PGRAPH_COMBINER_FINAL1                        0x00400E6C
+#define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL                  0x00400F00
+#define NV10_PGRAPH_WINDOWCLIP_VERTICAL                    0x00400F20
+#define NV10_PGRAPH_XFMODE0                                0x00400F40
+#define NV10_PGRAPH_XFMODE1                                0x00400F44
+#define NV10_PGRAPH_GLOBALSTATE0                           0x00400F48
+#define NV10_PGRAPH_GLOBALSTATE1                           0x00400F4C
+#define NV10_PGRAPH_PIPE_ADDRESS                           0x00400F50
+#define NV10_PGRAPH_PIPE_DATA                              0x00400F54
+#define NV04_PGRAPH_DMA_START_0                            0x00401000
+#define NV04_PGRAPH_DMA_START_1                            0x00401004
+#define NV04_PGRAPH_DMA_LENGTH                             0x00401008
+#define NV04_PGRAPH_DMA_MISC                               0x0040100C
+#define NV04_PGRAPH_DMA_DATA_0                             0x00401020
+#define NV04_PGRAPH_DMA_DATA_1                             0x00401024
+#define NV04_PGRAPH_DMA_RM                                 0x00401030
+#define NV04_PGRAPH_DMA_A_XLATE_INST                       0x00401040
+#define NV04_PGRAPH_DMA_A_CONTROL                          0x00401044
+#define NV04_PGRAPH_DMA_A_LIMIT                            0x00401048
+#define NV04_PGRAPH_DMA_A_TLB_PTE                          0x0040104C
+#define NV04_PGRAPH_DMA_A_TLB_TAG                          0x00401050
+#define NV04_PGRAPH_DMA_A_ADJ_OFFSET                       0x00401054
+#define NV04_PGRAPH_DMA_A_OFFSET                           0x00401058
+#define NV04_PGRAPH_DMA_A_SIZE                             0x0040105C
+#define NV04_PGRAPH_DMA_A_Y_SIZE                           0x00401060
+#define NV04_PGRAPH_DMA_B_XLATE_INST                       0x00401080
+#define NV04_PGRAPH_DMA_B_CONTROL                          0x00401084
+#define NV04_PGRAPH_DMA_B_LIMIT                            0x00401088
+#define NV04_PGRAPH_DMA_B_TLB_PTE                          0x0040108C
+#define NV04_PGRAPH_DMA_B_TLB_TAG                          0x00401090
+#define NV04_PGRAPH_DMA_B_ADJ_OFFSET                       0x00401094
+#define NV04_PGRAPH_DMA_B_OFFSET                           0x00401098
+#define NV04_PGRAPH_DMA_B_SIZE                             0x0040109C
+#define NV04_PGRAPH_DMA_B_Y_SIZE                           0x004010A0
+#define NV40_PGRAPH_TILE1(i)                               (0x00406900 + (i*16))
+#define NV40_PGRAPH_TLIMIT1(i)                             (0x00406904 + (i*16))
+#define NV40_PGRAPH_TSIZE1(i)                              (0x00406908 + (i*16))
+#define NV40_PGRAPH_TSTATUS1(i)                            (0x0040690C + (i*16))
+
+
+/* It's a guess that this works on NV03. Confirmed on NV04, though */
+#define NV04_PFIFO_DELAY_0                                 0x00002040
+#define NV04_PFIFO_DMA_TIMESLICE                           0x00002044
+#define NV04_PFIFO_NEXT_CHANNEL                            0x00002050
+#define NV03_PFIFO_INTR_0                                  0x00002100
+#define NV03_PFIFO_INTR_EN_0                               0x00002140
+#    define NV_PFIFO_INTR_CACHE_ERROR                          (1<<0)
+#    define NV_PFIFO_INTR_RUNOUT                               (1<<4)
+#    define NV_PFIFO_INTR_RUNOUT_OVERFLOW                      (1<<8)
+#    define NV_PFIFO_INTR_DMA_PUSHER                          (1<<12)
+#    define NV_PFIFO_INTR_DMA_PT                              (1<<16)
+#    define NV_PFIFO_INTR_SEMAPHORE                           (1<<20)
+#    define NV_PFIFO_INTR_ACQUIRE_TIMEOUT                     (1<<24)
+#define NV03_PFIFO_RAMHT                                   0x00002210
+#define NV03_PFIFO_RAMFC                                   0x00002214
+#define NV03_PFIFO_RAMRO                                   0x00002218
+#define NV40_PFIFO_RAMFC                                   0x00002220
+#define NV03_PFIFO_CACHES                                  0x00002500
+#define NV04_PFIFO_MODE                                    0x00002504
+#define NV04_PFIFO_DMA                                     0x00002508
+#define NV04_PFIFO_SIZE                                    0x0000250c
+#define NV50_PFIFO_CTX_TABLE(c)                        (0x2600+(c)*4)
+#define NV50_PFIFO_CTX_TABLE__SIZE                                128
+#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED                  (1<<31)
+#define NV50_PFIFO_CTX_TABLE_UNK30_BAD                        (1<<30)
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80             0x0FFFFFFF
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84             0x00FFFFFF
+#define NV03_PFIFO_CACHE0_PUSH0                            0x00003000
+#define NV03_PFIFO_CACHE0_PULL0                            0x00003040
+#define NV04_PFIFO_CACHE0_PULL0                            0x00003050
+#define NV04_PFIFO_CACHE0_PULL1                            0x00003054
+#define NV03_PFIFO_CACHE1_PUSH0                            0x00003200
+#define NV03_PFIFO_CACHE1_PUSH1                            0x00003204
+#define NV03_PFIFO_CACHE1_PUSH1_DMA                            (1<<8)
+#define NV40_PFIFO_CACHE1_PUSH1_DMA                           (1<<16)
+#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000000f
+#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000001f
+#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000007f
+#define NV03_PFIFO_CACHE1_PUT                              0x00003210
+#define NV04_PFIFO_CACHE1_DMA_PUSH                         0x00003220
+#define NV04_PFIFO_CACHE1_DMA_FETCH                        0x00003224
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES         0x00000000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES        0x00000008
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES        0x00000010
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES        0x00000018
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES        0x00000020
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES        0x00000028
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES        0x00000030
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES        0x00000038
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES        0x00000040
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES        0x00000048
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES        0x00000050
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES        0x00000058
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES       0x00000060
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES       0x00000068
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES       0x00000070
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES       0x00000078
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES       0x00000080
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES       0x00000088
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES       0x00000090
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES       0x00000098
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES       0x000000A0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES       0x000000A8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES       0x000000B0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES       0x000000B8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES       0x000000C0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES       0x000000C8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES       0x000000D0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES       0x000000D8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES       0x000000E0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES       0x000000E8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES       0x000000F0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES       0x000000F8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE                 0x0000E000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES        0x00000000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES        0x00002000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES        0x00004000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES       0x00006000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES       0x00008000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES       0x0000A000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES       0x0000C000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES       0x0000E000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS             0x001F0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0           0x00000000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1           0x00010000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2           0x00020000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3           0x00030000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4           0x00040000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5           0x00050000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6           0x00060000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7           0x00070000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8           0x00080000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9           0x00090000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10          0x000A0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11          0x000B0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12          0x000C0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13          0x000D0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14          0x000E0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15          0x000F0000
+#    define NV_PFIFO_CACHE1_ENDIAN                         0x80000000
+#    define NV_PFIFO_CACHE1_LITTLE_ENDIAN                  0x7FFFFFFF
+#    define NV_PFIFO_CACHE1_BIG_ENDIAN                     0x80000000
+#define NV04_PFIFO_CACHE1_DMA_STATE                        0x00003228
+#define NV04_PFIFO_CACHE1_DMA_INSTANCE                     0x0000322c
+#define NV04_PFIFO_CACHE1_DMA_CTL                          0x00003230
+#define NV04_PFIFO_CACHE1_DMA_PUT                          0x00003240
+#define NV04_PFIFO_CACHE1_DMA_GET                          0x00003244
+#define NV10_PFIFO_CACHE1_REF_CNT                          0x00003248
+#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE                   0x0000324C
+#define NV03_PFIFO_CACHE1_PULL0                            0x00003240
+#define NV04_PFIFO_CACHE1_PULL0                            0x00003250
+#define NV03_PFIFO_CACHE1_PULL1                            0x00003250
+#define NV04_PFIFO_CACHE1_PULL1                            0x00003254
+#define NV04_PFIFO_CACHE1_HASH                             0x00003258
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT                  0x00003260
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP                0x00003264
+#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE                    0x00003268
+#define NV10_PFIFO_CACHE1_SEMAPHORE                        0x0000326C
+#define NV03_PFIFO_CACHE1_GET                              0x00003270
+#define NV04_PFIFO_CACHE1_ENGINE                           0x00003280
+#define NV04_PFIFO_CACHE1_DMA_DCOUNT                       0x000032A0
+#define NV40_PFIFO_GRCTX_INSTANCE                          0x000032E0
+#define NV40_PFIFO_UNK32E4                                 0x000032E4
+#define NV04_PFIFO_CACHE1_METHOD(i)                (0x00003800+(i*8))
+#define NV04_PFIFO_CACHE1_DATA(i)                  (0x00003804+(i*8))
+#define NV40_PFIFO_CACHE1_METHOD(i)                (0x00090000+(i*8))
+#define NV40_PFIFO_CACHE1_DATA(i)                  (0x00090004+(i*8))
+
+#define NV_CRTC0_INTSTAT                                   0x00600100
+#define NV_CRTC0_INTEN                                     0x00600140
+#define NV_CRTC1_INTSTAT                                   0x00602100
+#define NV_CRTC1_INTEN                                     0x00602140
+#    define NV_CRTC_INTR_VBLANK                                (1<<0)
+
+#define NV04_PRAMIN						0x00700000
+
+/* Fifo commands. These are not regs, neither masks */
+#define NV03_FIFO_CMD_JUMP                                 0x20000000
+#define NV03_FIFO_CMD_JUMP_OFFSET_MASK                     0x1ffffffc
+#define NV03_FIFO_CMD_REWIND                               (NV03_FIFO_CMD_JUMP | (0 & NV03_FIFO_CMD_JUMP_OFFSET_MASK))
+
+/* This is a partial import from rules-ng, a few things may be duplicated.
+ * Eventually we should completely import everything from rules-ng.
+ * For the moment check rules-ng for docs.
+  */
+
+#define NV50_PMC                                            0x00000000
+#define NV50_PMC__LEN                                              0x1
+#define NV50_PMC__ESIZE                                         0x2000
+#    define NV50_PMC_BOOT_0                                 0x00000000
+#        define NV50_PMC_BOOT_0_REVISION                    0x000000ff
+#        define NV50_PMC_BOOT_0_REVISION__SHIFT                      0
+#        define NV50_PMC_BOOT_0_ARCH                        0x0ff00000
+#        define NV50_PMC_BOOT_0_ARCH__SHIFT                         20
+#    define NV50_PMC_INTR_0                                 0x00000100
+#        define NV50_PMC_INTR_0_PFIFO                           (1<<8)
+#        define NV50_PMC_INTR_0_PGRAPH                         (1<<12)
+#        define NV50_PMC_INTR_0_PTIMER                         (1<<20)
+#        define NV50_PMC_INTR_0_HOTPLUG                        (1<<21)
+#        define NV50_PMC_INTR_0_DISPLAY                        (1<<26)
+#    define NV50_PMC_INTR_EN_0                              0x00000140
+#        define NV50_PMC_INTR_EN_0_MASTER                       (1<<0)
+#            define NV50_PMC_INTR_EN_0_MASTER_DISABLED          (0<<0)
+#            define NV50_PMC_INTR_EN_0_MASTER_ENABLED           (1<<0)
+#    define NV50_PMC_ENABLE                                 0x00000200
+#        define NV50_PMC_ENABLE_PFIFO                           (1<<8)
+#        define NV50_PMC_ENABLE_PGRAPH                         (1<<12)
+
+#define NV50_PCONNECTOR                                     0x0000e000
+#define NV50_PCONNECTOR__LEN                                       0x1
+#define NV50_PCONNECTOR__ESIZE                                  0x1000
+#    define NV50_PCONNECTOR_HOTPLUG_INTR                    0x0000e050
+#        define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C0          (1<<0)
+#        define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C1          (1<<1)
+#        define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C2          (1<<2)
+#        define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C3          (1<<3)
+#        define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C0       (1<<16)
+#        define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C1       (1<<17)
+#        define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C2       (1<<18)
+#        define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C3       (1<<19)
+#    define NV50_PCONNECTOR_HOTPLUG_CTRL                    0x0000e054
+#        define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C0          (1<<0)
+#        define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C1          (1<<1)
+#        define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C2          (1<<2)
+#        define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C3          (1<<3)
+#        define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C0       (1<<16)
+#        define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C1       (1<<17)
+#        define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C2       (1<<18)
+#        define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C3       (1<<19)
+#    define NV50_PCONNECTOR_HOTPLUG_STATE                   0x0000e104
+#        define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C0 (1<<2)
+#        define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C1 (1<<6)
+#        define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C2 (1<<10)
+#        define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C3 (1<<14)
+#    define NV50_PCONNECTOR_I2C_PORT_0                      0x0000e138
+#    define NV50_PCONNECTOR_I2C_PORT_1                      0x0000e150
+#    define NV50_PCONNECTOR_I2C_PORT_2                      0x0000e168
+#    define NV50_PCONNECTOR_I2C_PORT_3                      0x0000e180
+#    define NV50_PCONNECTOR_I2C_PORT_4                      0x0000e240
+#    define NV50_PCONNECTOR_I2C_PORT_5                      0x0000e258
+
+#define NV50_AUXCH_DATA_OUT(i,n)             ((n) * 4 + (i) * 0x50 + 0x0000e4c0)
+#define NV50_AUXCH_DATA_OUT__SIZE                                             4
+#define NV50_AUXCH_DATA_IN(i,n)              ((n) * 4 + (i) * 0x50 + 0x0000e4d0)
+#define NV50_AUXCH_DATA_IN__SIZE                                              4
+#define NV50_AUXCH_ADDR(i)                             ((i) * 0x50 + 0x0000e4e0)
+#define NV50_AUXCH_CTRL(i)                             ((i) * 0x50 + 0x0000e4e4)
+#define NV50_AUXCH_CTRL_LINKSTAT                                     0x01000000
+#define NV50_AUXCH_CTRL_LINKSTAT_NOT_READY                           0x00000000
+#define NV50_AUXCH_CTRL_LINKSTAT_READY                               0x01000000
+#define NV50_AUXCH_CTRL_LINKEN                                       0x00100000
+#define NV50_AUXCH_CTRL_LINKEN_DISABLED                              0x00000000
+#define NV50_AUXCH_CTRL_LINKEN_ENABLED                               0x00100000
+#define NV50_AUXCH_CTRL_EXEC                                         0x00010000
+#define NV50_AUXCH_CTRL_EXEC_COMPLETE                                0x00000000
+#define NV50_AUXCH_CTRL_EXEC_IN_PROCESS                              0x00010000
+#define NV50_AUXCH_CTRL_CMD                                          0x0000f000
+#define NV50_AUXCH_CTRL_CMD_SHIFT                                            12
+#define NV50_AUXCH_CTRL_LEN                                          0x0000000f
+#define NV50_AUXCH_CTRL_LEN_SHIFT                                             0
+#define NV50_AUXCH_STAT(i)                             ((i) * 0x50 + 0x0000e4e8)
+#define NV50_AUXCH_STAT_STATE                                        0x10000000
+#define NV50_AUXCH_STAT_STATE_NOT_READY                              0x00000000
+#define NV50_AUXCH_STAT_STATE_READY                                  0x10000000
+#define NV50_AUXCH_STAT_REPLY                                        0x000f0000
+#define NV50_AUXCH_STAT_REPLY_AUX                                    0x00030000
+#define NV50_AUXCH_STAT_REPLY_AUX_ACK                                0x00000000
+#define NV50_AUXCH_STAT_REPLY_AUX_NACK                               0x00010000
+#define NV50_AUXCH_STAT_REPLY_AUX_DEFER                              0x00020000
+#define NV50_AUXCH_STAT_REPLY_I2C                                    0x000c0000
+#define NV50_AUXCH_STAT_REPLY_I2C_ACK                                0x00000000
+#define NV50_AUXCH_STAT_REPLY_I2C_NACK                               0x00040000
+#define NV50_AUXCH_STAT_REPLY_I2C_DEFER                              0x00080000
+#define NV50_AUXCH_STAT_COUNT                                        0x0000001f
+
+#define NV50_PBUS                                           0x00088000
+#define NV50_PBUS__LEN                                             0x1
+#define NV50_PBUS__ESIZE                                        0x1000
+#    define NV50_PBUS_PCI_ID                                0x00088000
+#        define NV50_PBUS_PCI_ID_VENDOR_ID                  0x0000ffff
+#        define NV50_PBUS_PCI_ID_VENDOR_ID__SHIFT                    0
+#        define NV50_PBUS_PCI_ID_DEVICE_ID                  0xffff0000
+#        define NV50_PBUS_PCI_ID_DEVICE_ID__SHIFT                   16
+
+#define NV50_PFB                                            0x00100000
+#define NV50_PFB__LEN                                              0x1
+#define NV50_PFB__ESIZE                                         0x1000
+
+#define NV50_PEXTDEV                                        0x00101000
+#define NV50_PEXTDEV__LEN                                          0x1
+#define NV50_PEXTDEV__ESIZE                                     0x1000
+
+#define NV50_PROM                                           0x00300000
+#define NV50_PROM__LEN                                             0x1
+#define NV50_PROM__ESIZE                                       0x10000
+
+#define NV50_PGRAPH                                         0x00400000
+#define NV50_PGRAPH__LEN                                           0x1
+#define NV50_PGRAPH__ESIZE                                     0x10000
+
+#define NV50_PDISPLAY                                                0x00610000
+#define NV50_PDISPLAY_OBJECTS                                        0x00610010
+#define NV50_PDISPLAY_INTR_0                                         0x00610020
+#define NV50_PDISPLAY_INTR_1                                         0x00610024
+#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC                             0x0000000c
+#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_SHIFT                                2
+#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(n)                   (1 << ((n) + 2))
+#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0                           0x00000004
+#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1                           0x00000008
+#define NV50_PDISPLAY_INTR_1_CLK_UNK10                               0x00000010
+#define NV50_PDISPLAY_INTR_1_CLK_UNK20                               0x00000020
+#define NV50_PDISPLAY_INTR_1_CLK_UNK40                               0x00000040
+#define NV50_PDISPLAY_INTR_EN                                        0x0061002c
+#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC                            0x0000000c
+#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(n)                   (1 << ((n) + 2))
+#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_0                          0x00000004
+#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_1                          0x00000008
+#define NV50_PDISPLAY_INTR_EN_CLK_UNK10                              0x00000010
+#define NV50_PDISPLAY_INTR_EN_CLK_UNK20                              0x00000020
+#define NV50_PDISPLAY_INTR_EN_CLK_UNK40                              0x00000040
+#define NV50_PDISPLAY_UNK30_CTRL                                     0x00610030
+#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK0                        0x00000200
+#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK1                        0x00000400
+#define NV50_PDISPLAY_UNK30_CTRL_PENDING                             0x80000000
+#define NV50_PDISPLAY_TRAPPED_ADDR                                   0x00610080
+#define NV50_PDISPLAY_TRAPPED_DATA                                   0x00610084
+#define NV50_PDISPLAY_CHANNEL_STAT(i)                  ((i) * 0x10 + 0x00610200)
+#define NV50_PDISPLAY_CHANNEL_STAT_DMA                               0x00000010
+#define NV50_PDISPLAY_CHANNEL_STAT_DMA_DISABLED                      0x00000000
+#define NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED                       0x00000010
+#define NV50_PDISPLAY_CHANNEL_DMA_CB(i)                ((i) * 0x10 + 0x00610204)
+#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION                        0x00000002
+#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM                   0x00000000
+#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_SYSTEM                 0x00000002
+#define NV50_PDISPLAY_CHANNEL_DMA_CB_VALID                           0x00000001
+#define NV50_PDISPLAY_CHANNEL_UNK2(i)                  ((i) * 0x10 + 0x00610208)
+#define NV50_PDISPLAY_CHANNEL_UNK3(i)                  ((i) * 0x10 + 0x0061020c)
+
+#define NV50_PDISPLAY_CURSOR                                         0x00610270
+#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)           ((i) * 0x10 + 0x00610270)
+#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON                         0x00000001
+#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS                     0x00030000
+#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE              0x00010000
+
+#define NV50_PDISPLAY_CTRL_STATE                                     0x00610300
+#define NV50_PDISPLAY_CTRL_STATE_PENDING                             0x80000000
+#define NV50_PDISPLAY_CTRL_STATE_METHOD                              0x00001ffc
+#define NV50_PDISPLAY_CTRL_STATE_ENABLE                              0x00000001
+#define NV50_PDISPLAY_CTRL_VAL                                       0x00610304
+#define NV50_PDISPLAY_UNK_380                                        0x00610380
+#define NV50_PDISPLAY_RAM_AMOUNT                                     0x00610384
+#define NV50_PDISPLAY_UNK_388                                        0x00610388
+#define NV50_PDISPLAY_UNK_38C                                        0x0061038c
+
+#define NV50_PDISPLAY_CRTC_P(i, r)        ((i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
+#define NV50_PDISPLAY_CRTC_C(i, r)    (4 + (i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
+#define NV50_PDISPLAY_CRTC_UNK_0A18 /* mthd 0x0900 */                0x00610a18
+#define NV50_PDISPLAY_CRTC_CLUT_MODE                                 0x00610a24
+#define NV50_PDISPLAY_CRTC_INTERLACE                                 0x00610a48
+#define NV50_PDISPLAY_CRTC_SCALE_CTRL                                0x00610a50
+#define NV50_PDISPLAY_CRTC_CURSOR_CTRL                               0x00610a58
+#define NV50_PDISPLAY_CRTC_UNK0A78 /* mthd 0x0904 */                 0x00610a78
+#define NV50_PDISPLAY_CRTC_UNK0AB8                                   0x00610ab8
+#define NV50_PDISPLAY_CRTC_DEPTH                                     0x00610ac8
+#define NV50_PDISPLAY_CRTC_CLOCK                                     0x00610ad0
+#define NV50_PDISPLAY_CRTC_COLOR_CTRL                                0x00610ae0
+#define NV50_PDISPLAY_CRTC_SYNC_START_TO_BLANK_END                   0x00610ae8
+#define NV50_PDISPLAY_CRTC_MODE_UNK1                                 0x00610af0
+#define NV50_PDISPLAY_CRTC_DISPLAY_TOTAL                             0x00610af8
+#define NV50_PDISPLAY_CRTC_SYNC_DURATION                             0x00610b00
+#define NV50_PDISPLAY_CRTC_MODE_UNK2                                 0x00610b08
+#define NV50_PDISPLAY_CRTC_UNK_0B10 /* mthd 0x0828 */                0x00610b10
+#define NV50_PDISPLAY_CRTC_FB_SIZE                                   0x00610b18
+#define NV50_PDISPLAY_CRTC_FB_PITCH                                  0x00610b20
+#define NV50_PDISPLAY_CRTC_FB_PITCH_LINEAR                           0x00100000
+#define NV50_PDISPLAY_CRTC_FB_POS                                    0x00610b28
+#define NV50_PDISPLAY_CRTC_SCALE_CENTER_OFFSET                       0x00610b38
+#define NV50_PDISPLAY_CRTC_REAL_RES                                  0x00610b40
+#define NV50_PDISPLAY_CRTC_SCALE_RES1                                0x00610b48
+#define NV50_PDISPLAY_CRTC_SCALE_RES2                                0x00610b50
+
+#define NV50_PDISPLAY_DAC_MODE_CTRL_P(i)                (0x00610b58 + (i) * 0x8)
+#define NV50_PDISPLAY_DAC_MODE_CTRL_C(i)                (0x00610b5c + (i) * 0x8)
+#define NV50_PDISPLAY_SOR_MODE_CTRL_P(i)                (0x00610b70 + (i) * 0x8)
+#define NV50_PDISPLAY_SOR_MODE_CTRL_C(i)                (0x00610b74 + (i) * 0x8)
+#define NV50_PDISPLAY_DAC_MODE_CTRL2_P(i)               (0x00610bdc + (i) * 0x8)
+#define NV50_PDISPLAY_DAC_MODE_CTRL2_C(i)               (0x00610be0 + (i) * 0x8)
+
+#define NV90_PDISPLAY_SOR_MODE_CTRL_P(i)                (0x00610794 + (i) * 0x8)
+#define NV90_PDISPLAY_SOR_MODE_CTRL_C(i)                (0x00610798 + (i) * 0x8)
+#define NV90_PDISPLAY_DAC_MODE_CTRL_P(i)                (0x00610b58 + (i) * 0x8)
+#define NV90_PDISPLAY_DAC_MODE_CTRL_C(i)                (0x00610b5c + (i) * 0x8)
+#define NV90_PDISPLAY_DAC_MODE_CTRL2_P(i)               (0x00610b80 + (i) * 0x8)
+#define NV90_PDISPLAY_DAC_MODE_CTRL2_C(i)               (0x00610b84 + (i) * 0x8)
+
+#define NV50_PDISPLAY_CRTC_CLK                                       0x00614000
+#define NV50_PDISPLAY_CRTC_CLK_CTRL1(i)                 ((i) * 0x800 + 0x614100)
+#define NV50_PDISPLAY_CRTC_CLK_CTRL1_CONNECTED                       0x00000600
+#define NV50_PDISPLAY_CRTC_CLK_VPLL_A(i)                ((i) * 0x800 + 0x614104)
+#define NV50_PDISPLAY_CRTC_CLK_VPLL_B(i)                ((i) * 0x800 + 0x614108)
+#define NV50_PDISPLAY_CRTC_CLK_CTRL2(i)                 ((i) * 0x800 + 0x614200)
+
+#define NV50_PDISPLAY_DAC_CLK                                        0x00614000
+#define NV50_PDISPLAY_DAC_CLK_CTRL2(i)                  ((i) * 0x800 + 0x614280)
+
+#define NV50_PDISPLAY_SOR_CLK                                        0x00614000
+#define NV50_PDISPLAY_SOR_CLK_CTRL2(i)                  ((i) * 0x800 + 0x614300)
+
+#define NV50_PDISPLAY_VGACRTC(r)                                ((r) + 0x619400)
+
+#define NV50_PDISPLAY_DAC                                            0x0061a000
+#define NV50_PDISPLAY_DAC_DPMS_CTRL(i)                (0x0061a004 + (i) * 0x800)
+#define NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF                        0x00000001
+#define NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF                        0x00000004
+#define NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED                          0x00000010
+#define NV50_PDISPLAY_DAC_DPMS_CTRL_OFF                              0x00000040
+#define NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING                          0x80000000
+#define NV50_PDISPLAY_DAC_LOAD_CTRL(i)                (0x0061a00c + (i) * 0x800)
+#define NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE                           0x00100000
+#define NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT                          0x38000000
+#define NV50_PDISPLAY_DAC_LOAD_CTRL_DONE                             0x80000000
+#define NV50_PDISPLAY_DAC_CLK_CTRL1(i)                (0x0061a010 + (i) * 0x800)
+#define NV50_PDISPLAY_DAC_CLK_CTRL1_CONNECTED                        0x00000600
+
+#define NV50_PDISPLAY_SOR                                            0x0061c000
+#define NV50_PDISPLAY_SOR_DPMS_CTRL(i)                (0x0061c004 + (i) * 0x800)
+#define NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING                          0x80000000
+#define NV50_PDISPLAY_SOR_DPMS_CTRL_ON                               0x00000001
+#define NV50_PDISPLAY_SOR_CLK_CTRL1(i)                (0x0061c008 + (i) * 0x800)
+#define NV50_PDISPLAY_SOR_CLK_CTRL1_CONNECTED                        0x00000600
+#define NV50_PDISPLAY_SOR_DPMS_STATE(i)               (0x0061c030 + (i) * 0x800)
+#define NV50_PDISPLAY_SOR_DPMS_STATE_ACTIVE                          0x00030000
+#define NV50_PDISPLAY_SOR_DPMS_STATE_BLANKED                         0x00080000
+#define NV50_PDISPLAY_SOR_DPMS_STATE_WAIT                            0x10000000
+#define NV50_PDISPLAY_SOR_BACKLIGHT                                  0x0061c084
+#define NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE                           0x80000000
+#define NV50_PDISPLAY_SOR_BACKLIGHT_LEVEL                            0x00000fff
+#define NV50_SOR_DP_CTRL(i,l)            (0x0061c10c + (i) * 0x800 + (l) * 0x80)
+#define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED                      0x00004000
+#define NV50_SOR_DP_CTRL_LANE_MASK                                   0x001f0000
+#define NV50_SOR_DP_CTRL_LANE_0_ENABLED                              0x00010000
+#define NV50_SOR_DP_CTRL_LANE_1_ENABLED                              0x00020000
+#define NV50_SOR_DP_CTRL_LANE_2_ENABLED                              0x00040000
+#define NV50_SOR_DP_CTRL_LANE_3_ENABLED                              0x00080000
+#define NV50_SOR_DP_CTRL_TRAINING_PATTERN                            0x0f000000
+#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_DISABLED                   0x00000000
+#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_1                          0x01000000
+#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2                          0x02000000
+#define NV50_SOR_DP_UNK118(i,l)          (0x0061c118 + (i) * 0x800 + (l) * 0x80)
+#define NV50_SOR_DP_UNK120(i,l)          (0x0061c120 + (i) * 0x800 + (l) * 0x80)
+#define NV50_SOR_DP_UNK130(i,l)          (0x0061c130 + (i) * 0x800 + (l) * 0x80)
+
+#define NV50_PDISPLAY_USER(i)                        ((i) * 0x1000 + 0x00640000)
+#define NV50_PDISPLAY_USER_PUT(i)                    ((i) * 0x1000 + 0x00640000)
+#define NV50_PDISPLAY_USER_GET(i)                    ((i) * 0x1000 + 0x00640004)
+
+#define NV50_PDISPLAY_CURSOR_USER                                    0x00647000
+#define NV50_PDISPLAY_CURSOR_USER_POS_CTRL(i)        ((i) * 0x1000 + 0x00647080)
+#define NV50_PDISPLAY_CURSOR_USER_POS(i)             ((i) * 0x1000 + 0x00647084)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
new file mode 100644
index 0000000..4c7f1e4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -0,0 +1,321 @@
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include <linux/pagemap.h>
+
+#define NV_CTXDMA_PAGE_SHIFT 12
+#define NV_CTXDMA_PAGE_SIZE  (1 << NV_CTXDMA_PAGE_SHIFT)
+#define NV_CTXDMA_PAGE_MASK  (NV_CTXDMA_PAGE_SIZE - 1)
+
+struct nouveau_sgdma_be {
+	struct ttm_backend backend;
+	struct drm_device *dev;
+
+	dma_addr_t *pages;
+	unsigned nr_pages;
+
+	unsigned pte_start;
+	bool bound;
+};
+
+static int
+nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
+		       struct page **pages, struct page *dummy_read_page)
+{
+	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+	struct drm_device *dev = nvbe->dev;
+
+	NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
+
+	if (nvbe->pages)
+		return -EINVAL;
+
+	nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
+	if (!nvbe->pages)
+		return -ENOMEM;
+
+	nvbe->nr_pages = 0;
+	while (num_pages--) {
+		nvbe->pages[nvbe->nr_pages] =
+			pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
+				     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+		if (pci_dma_mapping_error(dev->pdev,
+					  nvbe->pages[nvbe->nr_pages])) {
+			be->func->clear(be);
+			return -EFAULT;
+		}
+
+		nvbe->nr_pages++;
+	}
+
+	return 0;
+}
+
+static void
+nouveau_sgdma_clear(struct ttm_backend *be)
+{
+	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+	struct drm_device *dev = nvbe->dev;
+
+	NV_DEBUG(nvbe->dev, "\n");
+
+	if (nvbe && nvbe->pages) {
+		if (nvbe->bound)
+			be->func->unbind(be);
+
+		while (nvbe->nr_pages--) {
+			pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
+				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+		}
+		kfree(nvbe->pages);
+		nvbe->pages = NULL;
+		nvbe->nr_pages = 0;
+	}
+}
+
+static inline unsigned
+nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
+
+	if (dev_priv->card_type < NV_50)
+		return pte + 2;
+
+	return pte << 1;
+}
+
+static int
+nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+{
+	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+	struct drm_device *dev = nvbe->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
+	unsigned i, j, pte;
+
+	NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
+
+	dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
+	pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
+	nvbe->pte_start = pte;
+	for (i = 0; i < nvbe->nr_pages; i++) {
+		dma_addr_t dma_offset = nvbe->pages[i];
+		uint32_t offset_l = lower_32_bits(dma_offset);
+		uint32_t offset_h = upper_32_bits(dma_offset);
+
+		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
+			if (dev_priv->card_type < NV_50)
+				nv_wo32(dev, gpuobj, pte++, offset_l | 3);
+			else {
+				nv_wo32(dev, gpuobj, pte++, offset_l | 0x21);
+				nv_wo32(dev, gpuobj, pte++, offset_h & 0xff);
+			}
+
+			dma_offset += NV_CTXDMA_PAGE_SIZE;
+		}
+	}
+	dev_priv->engine.instmem.finish_access(nvbe->dev);
+
+	if (dev_priv->card_type == NV_50) {
+		nv_wr32(dev, 0x100c80, 0x00050001);
+		if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+			NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+			NV_ERROR(dev, "0x100c80 = 0x%08x\n",
+						nv_rd32(dev, 0x100c80));
+			return -EBUSY;
+		}
+
+		nv_wr32(dev, 0x100c80, 0x00000001);
+		if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+			NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+			NV_ERROR(dev, "0x100c80 = 0x%08x\n",
+						nv_rd32(dev, 0x100c80));
+			return -EBUSY;
+		}
+	}
+
+	nvbe->bound = true;
+	return 0;
+}
+
+static int
+nouveau_sgdma_unbind(struct ttm_backend *be)
+{
+	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+	struct drm_device *dev = nvbe->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
+	unsigned i, j, pte;
+
+	NV_DEBUG(dev, "\n");
+
+	if (!nvbe->bound)
+		return 0;
+
+	dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
+	pte = nvbe->pte_start;
+	for (i = 0; i < nvbe->nr_pages; i++) {
+		dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
+
+		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
+			if (dev_priv->card_type < NV_50)
+				nv_wo32(dev, gpuobj, pte++, dma_offset | 3);
+			else {
+				nv_wo32(dev, gpuobj, pte++, dma_offset | 0x21);
+				nv_wo32(dev, gpuobj, pte++, 0x00000000);
+			}
+
+			dma_offset += NV_CTXDMA_PAGE_SIZE;
+		}
+	}
+	dev_priv->engine.instmem.finish_access(nvbe->dev);
+
+	nvbe->bound = false;
+	return 0;
+}
+
+static void
+nouveau_sgdma_destroy(struct ttm_backend *be)
+{
+	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+
+	if (be) {
+		NV_DEBUG(nvbe->dev, "\n");
+
+		if (nvbe) {
+			if (nvbe->pages)
+				be->func->clear(be);
+			kfree(nvbe);
+		}
+	}
+}
+
+static struct ttm_backend_func nouveau_sgdma_backend = {
+	.populate		= nouveau_sgdma_populate,
+	.clear			= nouveau_sgdma_clear,
+	.bind			= nouveau_sgdma_bind,
+	.unbind			= nouveau_sgdma_unbind,
+	.destroy		= nouveau_sgdma_destroy
+};
+
+struct ttm_backend *
+nouveau_sgdma_init_ttm(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_sgdma_be *nvbe;
+
+	if (!dev_priv->gart_info.sg_ctxdma)
+		return NULL;
+
+	nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
+	if (!nvbe)
+		return NULL;
+
+	nvbe->dev = dev;
+
+	nvbe->backend.func	= &nouveau_sgdma_backend;
+
+	return &nvbe->backend;
+}
+
+int
+nouveau_sgdma_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj *gpuobj = NULL;
+	uint32_t aper_size, obj_size;
+	int i, ret;
+
+	if (dev_priv->card_type < NV_50) {
+		aper_size = (64 * 1024 * 1024);
+		obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
+		obj_size += 8; /* ctxdma header */
+	} else {
+		/* 1 entire VM page table */
+		aper_size = (512 * 1024 * 1024);
+		obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
+	}
+
+	ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
+				      NVOBJ_FLAG_ALLOW_NO_REFS |
+				      NVOBJ_FLAG_ZERO_ALLOC |
+				      NVOBJ_FLAG_ZERO_FREE, &gpuobj);
+	if (ret) {
+		NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
+		return ret;
+	}
+
+	dev_priv->gart_info.sg_dummy_page =
+		alloc_page(GFP_KERNEL|__GFP_DMA32);
+	set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
+	dev_priv->gart_info.sg_dummy_bus =
+		pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
+			     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+
+	dev_priv->engine.instmem.prepare_access(dev, true);
+	if (dev_priv->card_type < NV_50) {
+		/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
+		 * confirmed to work on c51.  Perhaps means NV_DMA_TARGET_PCIE
+		 * on those cards? */
+		nv_wo32(dev, gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
+				       (1 << 12) /* PT present */ |
+				       (0 << 13) /* PT *not* linear */ |
+				       (NV_DMA_ACCESS_RW  << 14) |
+				       (NV_DMA_TARGET_PCI << 16));
+		nv_wo32(dev, gpuobj, 1, aper_size - 1);
+		for (i = 2; i < 2 + (aper_size >> 12); i++) {
+			nv_wo32(dev, gpuobj, i,
+				    dev_priv->gart_info.sg_dummy_bus | 3);
+		}
+	} else {
+		for (i = 0; i < obj_size; i += 8) {
+			nv_wo32(dev, gpuobj, (i+0)/4,
+				    dev_priv->gart_info.sg_dummy_bus | 0x21);
+			nv_wo32(dev, gpuobj, (i+4)/4, 0);
+		}
+	}
+	dev_priv->engine.instmem.finish_access(dev);
+
+	dev_priv->gart_info.type      = NOUVEAU_GART_SGDMA;
+	dev_priv->gart_info.aper_base = 0;
+	dev_priv->gart_info.aper_size = aper_size;
+	dev_priv->gart_info.sg_ctxdma = gpuobj;
+	return 0;
+}
+
+void
+nouveau_sgdma_takedown(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->gart_info.sg_dummy_page) {
+		pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
+			       NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+		unlock_page(dev_priv->gart_info.sg_dummy_page);
+		__free_page(dev_priv->gart_info.sg_dummy_page);
+		dev_priv->gart_info.sg_dummy_page = NULL;
+		dev_priv->gart_info.sg_dummy_bus = 0;
+	}
+
+	nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
+}
+
+int
+nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
+	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
+	int pte;
+
+	pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
+	if (dev_priv->card_type < NV_50) {
+		instmem->prepare_access(dev, false);
+		*page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
+		instmem->finish_access(dev);
+		return 0;
+	}
+
+	NV_ERROR(dev, "Unimplemented on NV50\n");
+	return -EINVAL;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
new file mode 100644
index 0000000..2ed41d3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -0,0 +1,811 @@
+/*
+ * Copyright 2005 Stephane Marchesin
+ * Copyright 2008 Stuart Bennett
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/swab.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_sarea.h"
+#include "drm_crtc_helper.h"
+#include <linux/vgaarb.h>
+
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nv50_display.h"
+
+static int nouveau_stub_init(struct drm_device *dev) { return 0; }
+static void nouveau_stub_takedown(struct drm_device *dev) {}
+
+static int nouveau_init_engine_ptrs(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_engine *engine = &dev_priv->engine;
+
+	switch (dev_priv->chipset & 0xf0) {
+	case 0x00:
+		engine->instmem.init		= nv04_instmem_init;
+		engine->instmem.takedown	= nv04_instmem_takedown;
+		engine->instmem.suspend		= nv04_instmem_suspend;
+		engine->instmem.resume		= nv04_instmem_resume;
+		engine->instmem.populate	= nv04_instmem_populate;
+		engine->instmem.clear		= nv04_instmem_clear;
+		engine->instmem.bind		= nv04_instmem_bind;
+		engine->instmem.unbind		= nv04_instmem_unbind;
+		engine->instmem.prepare_access	= nv04_instmem_prepare_access;
+		engine->instmem.finish_access	= nv04_instmem_finish_access;
+		engine->mc.init			= nv04_mc_init;
+		engine->mc.takedown		= nv04_mc_takedown;
+		engine->timer.init		= nv04_timer_init;
+		engine->timer.read		= nv04_timer_read;
+		engine->timer.takedown		= nv04_timer_takedown;
+		engine->fb.init			= nv04_fb_init;
+		engine->fb.takedown		= nv04_fb_takedown;
+		engine->graph.grclass		= nv04_graph_grclass;
+		engine->graph.init		= nv04_graph_init;
+		engine->graph.takedown		= nv04_graph_takedown;
+		engine->graph.fifo_access	= nv04_graph_fifo_access;
+		engine->graph.channel		= nv04_graph_channel;
+		engine->graph.create_context	= nv04_graph_create_context;
+		engine->graph.destroy_context	= nv04_graph_destroy_context;
+		engine->graph.load_context	= nv04_graph_load_context;
+		engine->graph.unload_context	= nv04_graph_unload_context;
+		engine->fifo.channels		= 16;
+		engine->fifo.init		= nv04_fifo_init;
+		engine->fifo.takedown		= nouveau_stub_takedown;
+		engine->fifo.disable		= nv04_fifo_disable;
+		engine->fifo.enable		= nv04_fifo_enable;
+		engine->fifo.reassign		= nv04_fifo_reassign;
+		engine->fifo.channel_id		= nv04_fifo_channel_id;
+		engine->fifo.create_context	= nv04_fifo_create_context;
+		engine->fifo.destroy_context	= nv04_fifo_destroy_context;
+		engine->fifo.load_context	= nv04_fifo_load_context;
+		engine->fifo.unload_context	= nv04_fifo_unload_context;
+		break;
+	case 0x10:
+		engine->instmem.init		= nv04_instmem_init;
+		engine->instmem.takedown	= nv04_instmem_takedown;
+		engine->instmem.suspend		= nv04_instmem_suspend;
+		engine->instmem.resume		= nv04_instmem_resume;
+		engine->instmem.populate	= nv04_instmem_populate;
+		engine->instmem.clear		= nv04_instmem_clear;
+		engine->instmem.bind		= nv04_instmem_bind;
+		engine->instmem.unbind		= nv04_instmem_unbind;
+		engine->instmem.prepare_access	= nv04_instmem_prepare_access;
+		engine->instmem.finish_access	= nv04_instmem_finish_access;
+		engine->mc.init			= nv04_mc_init;
+		engine->mc.takedown		= nv04_mc_takedown;
+		engine->timer.init		= nv04_timer_init;
+		engine->timer.read		= nv04_timer_read;
+		engine->timer.takedown		= nv04_timer_takedown;
+		engine->fb.init			= nv10_fb_init;
+		engine->fb.takedown		= nv10_fb_takedown;
+		engine->graph.grclass		= nv10_graph_grclass;
+		engine->graph.init		= nv10_graph_init;
+		engine->graph.takedown		= nv10_graph_takedown;
+		engine->graph.channel		= nv10_graph_channel;
+		engine->graph.create_context	= nv10_graph_create_context;
+		engine->graph.destroy_context	= nv10_graph_destroy_context;
+		engine->graph.fifo_access	= nv04_graph_fifo_access;
+		engine->graph.load_context	= nv10_graph_load_context;
+		engine->graph.unload_context	= nv10_graph_unload_context;
+		engine->fifo.channels		= 32;
+		engine->fifo.init		= nv10_fifo_init;
+		engine->fifo.takedown		= nouveau_stub_takedown;
+		engine->fifo.disable		= nv04_fifo_disable;
+		engine->fifo.enable		= nv04_fifo_enable;
+		engine->fifo.reassign		= nv04_fifo_reassign;
+		engine->fifo.channel_id		= nv10_fifo_channel_id;
+		engine->fifo.create_context	= nv10_fifo_create_context;
+		engine->fifo.destroy_context	= nv10_fifo_destroy_context;
+		engine->fifo.load_context	= nv10_fifo_load_context;
+		engine->fifo.unload_context	= nv10_fifo_unload_context;
+		break;
+	case 0x20:
+		engine->instmem.init		= nv04_instmem_init;
+		engine->instmem.takedown	= nv04_instmem_takedown;
+		engine->instmem.suspend		= nv04_instmem_suspend;
+		engine->instmem.resume		= nv04_instmem_resume;
+		engine->instmem.populate	= nv04_instmem_populate;
+		engine->instmem.clear		= nv04_instmem_clear;
+		engine->instmem.bind		= nv04_instmem_bind;
+		engine->instmem.unbind		= nv04_instmem_unbind;
+		engine->instmem.prepare_access	= nv04_instmem_prepare_access;
+		engine->instmem.finish_access	= nv04_instmem_finish_access;
+		engine->mc.init			= nv04_mc_init;
+		engine->mc.takedown		= nv04_mc_takedown;
+		engine->timer.init		= nv04_timer_init;
+		engine->timer.read		= nv04_timer_read;
+		engine->timer.takedown		= nv04_timer_takedown;
+		engine->fb.init			= nv10_fb_init;
+		engine->fb.takedown		= nv10_fb_takedown;
+		engine->graph.grclass		= nv20_graph_grclass;
+		engine->graph.init		= nv20_graph_init;
+		engine->graph.takedown		= nv20_graph_takedown;
+		engine->graph.channel		= nv10_graph_channel;
+		engine->graph.create_context	= nv20_graph_create_context;
+		engine->graph.destroy_context	= nv20_graph_destroy_context;
+		engine->graph.fifo_access	= nv04_graph_fifo_access;
+		engine->graph.load_context	= nv20_graph_load_context;
+		engine->graph.unload_context	= nv20_graph_unload_context;
+		engine->fifo.channels		= 32;
+		engine->fifo.init		= nv10_fifo_init;
+		engine->fifo.takedown		= nouveau_stub_takedown;
+		engine->fifo.disable		= nv04_fifo_disable;
+		engine->fifo.enable		= nv04_fifo_enable;
+		engine->fifo.reassign		= nv04_fifo_reassign;
+		engine->fifo.channel_id		= nv10_fifo_channel_id;
+		engine->fifo.create_context	= nv10_fifo_create_context;
+		engine->fifo.destroy_context	= nv10_fifo_destroy_context;
+		engine->fifo.load_context	= nv10_fifo_load_context;
+		engine->fifo.unload_context	= nv10_fifo_unload_context;
+		break;
+	case 0x30:
+		engine->instmem.init		= nv04_instmem_init;
+		engine->instmem.takedown	= nv04_instmem_takedown;
+		engine->instmem.suspend		= nv04_instmem_suspend;
+		engine->instmem.resume		= nv04_instmem_resume;
+		engine->instmem.populate	= nv04_instmem_populate;
+		engine->instmem.clear		= nv04_instmem_clear;
+		engine->instmem.bind		= nv04_instmem_bind;
+		engine->instmem.unbind		= nv04_instmem_unbind;
+		engine->instmem.prepare_access	= nv04_instmem_prepare_access;
+		engine->instmem.finish_access	= nv04_instmem_finish_access;
+		engine->mc.init			= nv04_mc_init;
+		engine->mc.takedown		= nv04_mc_takedown;
+		engine->timer.init		= nv04_timer_init;
+		engine->timer.read		= nv04_timer_read;
+		engine->timer.takedown		= nv04_timer_takedown;
+		engine->fb.init			= nv10_fb_init;
+		engine->fb.takedown		= nv10_fb_takedown;
+		engine->graph.grclass		= nv30_graph_grclass;
+		engine->graph.init		= nv30_graph_init;
+		engine->graph.takedown		= nv20_graph_takedown;
+		engine->graph.fifo_access	= nv04_graph_fifo_access;
+		engine->graph.channel		= nv10_graph_channel;
+		engine->graph.create_context	= nv20_graph_create_context;
+		engine->graph.destroy_context	= nv20_graph_destroy_context;
+		engine->graph.load_context	= nv20_graph_load_context;
+		engine->graph.unload_context	= nv20_graph_unload_context;
+		engine->fifo.channels		= 32;
+		engine->fifo.init		= nv10_fifo_init;
+		engine->fifo.takedown		= nouveau_stub_takedown;
+		engine->fifo.disable		= nv04_fifo_disable;
+		engine->fifo.enable		= nv04_fifo_enable;
+		engine->fifo.reassign		= nv04_fifo_reassign;
+		engine->fifo.channel_id		= nv10_fifo_channel_id;
+		engine->fifo.create_context	= nv10_fifo_create_context;
+		engine->fifo.destroy_context	= nv10_fifo_destroy_context;
+		engine->fifo.load_context	= nv10_fifo_load_context;
+		engine->fifo.unload_context	= nv10_fifo_unload_context;
+		break;
+	case 0x40:
+	case 0x60:
+		engine->instmem.init		= nv04_instmem_init;
+		engine->instmem.takedown	= nv04_instmem_takedown;
+		engine->instmem.suspend		= nv04_instmem_suspend;
+		engine->instmem.resume		= nv04_instmem_resume;
+		engine->instmem.populate	= nv04_instmem_populate;
+		engine->instmem.clear		= nv04_instmem_clear;
+		engine->instmem.bind		= nv04_instmem_bind;
+		engine->instmem.unbind		= nv04_instmem_unbind;
+		engine->instmem.prepare_access	= nv04_instmem_prepare_access;
+		engine->instmem.finish_access	= nv04_instmem_finish_access;
+		engine->mc.init			= nv40_mc_init;
+		engine->mc.takedown		= nv40_mc_takedown;
+		engine->timer.init		= nv04_timer_init;
+		engine->timer.read		= nv04_timer_read;
+		engine->timer.takedown		= nv04_timer_takedown;
+		engine->fb.init			= nv40_fb_init;
+		engine->fb.takedown		= nv40_fb_takedown;
+		engine->graph.grclass		= nv40_graph_grclass;
+		engine->graph.init		= nv40_graph_init;
+		engine->graph.takedown		= nv40_graph_takedown;
+		engine->graph.fifo_access	= nv04_graph_fifo_access;
+		engine->graph.channel		= nv40_graph_channel;
+		engine->graph.create_context	= nv40_graph_create_context;
+		engine->graph.destroy_context	= nv40_graph_destroy_context;
+		engine->graph.load_context	= nv40_graph_load_context;
+		engine->graph.unload_context	= nv40_graph_unload_context;
+		engine->fifo.channels		= 32;
+		engine->fifo.init		= nv40_fifo_init;
+		engine->fifo.takedown		= nouveau_stub_takedown;
+		engine->fifo.disable		= nv04_fifo_disable;
+		engine->fifo.enable		= nv04_fifo_enable;
+		engine->fifo.reassign		= nv04_fifo_reassign;
+		engine->fifo.channel_id		= nv10_fifo_channel_id;
+		engine->fifo.create_context	= nv40_fifo_create_context;
+		engine->fifo.destroy_context	= nv40_fifo_destroy_context;
+		engine->fifo.load_context	= nv40_fifo_load_context;
+		engine->fifo.unload_context	= nv40_fifo_unload_context;
+		break;
+	case 0x50:
+	case 0x80: /* gotta love NVIDIA's consistency.. */
+	case 0x90:
+	case 0xA0:
+		engine->instmem.init		= nv50_instmem_init;
+		engine->instmem.takedown	= nv50_instmem_takedown;
+		engine->instmem.suspend		= nv50_instmem_suspend;
+		engine->instmem.resume		= nv50_instmem_resume;
+		engine->instmem.populate	= nv50_instmem_populate;
+		engine->instmem.clear		= nv50_instmem_clear;
+		engine->instmem.bind		= nv50_instmem_bind;
+		engine->instmem.unbind		= nv50_instmem_unbind;
+		engine->instmem.prepare_access	= nv50_instmem_prepare_access;
+		engine->instmem.finish_access	= nv50_instmem_finish_access;
+		engine->mc.init			= nv50_mc_init;
+		engine->mc.takedown		= nv50_mc_takedown;
+		engine->timer.init		= nv04_timer_init;
+		engine->timer.read		= nv04_timer_read;
+		engine->timer.takedown		= nv04_timer_takedown;
+		engine->fb.init			= nouveau_stub_init;
+		engine->fb.takedown		= nouveau_stub_takedown;
+		engine->graph.grclass		= nv50_graph_grclass;
+		engine->graph.init		= nv50_graph_init;
+		engine->graph.takedown		= nv50_graph_takedown;
+		engine->graph.fifo_access	= nv50_graph_fifo_access;
+		engine->graph.channel		= nv50_graph_channel;
+		engine->graph.create_context	= nv50_graph_create_context;
+		engine->graph.destroy_context	= nv50_graph_destroy_context;
+		engine->graph.load_context	= nv50_graph_load_context;
+		engine->graph.unload_context	= nv50_graph_unload_context;
+		engine->fifo.channels		= 128;
+		engine->fifo.init		= nv50_fifo_init;
+		engine->fifo.takedown		= nv50_fifo_takedown;
+		engine->fifo.disable		= nv04_fifo_disable;
+		engine->fifo.enable		= nv04_fifo_enable;
+		engine->fifo.reassign		= nv04_fifo_reassign;
+		engine->fifo.channel_id		= nv50_fifo_channel_id;
+		engine->fifo.create_context	= nv50_fifo_create_context;
+		engine->fifo.destroy_context	= nv50_fifo_destroy_context;
+		engine->fifo.load_context	= nv50_fifo_load_context;
+		engine->fifo.unload_context	= nv50_fifo_unload_context;
+		break;
+	default:
+		NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
+		return 1;
+	}
+
+	return 0;
+}
+
+static unsigned int
+nouveau_vga_set_decode(void *priv, bool state)
+{
+	if (state)
+		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+	else
+		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+
+int
+nouveau_card_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_engine *engine;
+	struct nouveau_gpuobj *gpuobj;
+	int ret;
+
+	NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
+
+	if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE)
+		return 0;
+
+	vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
+
+	/* Initialise internal driver API hooks */
+	ret = nouveau_init_engine_ptrs(dev);
+	if (ret)
+		return ret;
+	engine = &dev_priv->engine;
+	dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
+
+	/* Parse BIOS tables / Run init tables if card not POSTed */
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		ret = nouveau_bios_init(dev);
+		if (ret)
+			return ret;
+	}
+
+	ret = nouveau_gpuobj_early_init(dev);
+	if (ret)
+		return ret;
+
+	/* Initialise instance memory, must happen before mem_init so we
+	 * know exactly how much VRAM we're able to use for "normal"
+	 * purposes.
+	 */
+	ret = engine->instmem.init(dev);
+	if (ret)
+		return ret;
+
+	/* Setup the memory manager */
+	ret = nouveau_mem_init(dev);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_init(dev);
+	if (ret)
+		return ret;
+
+	/* PMC */
+	ret = engine->mc.init(dev);
+	if (ret)
+		return ret;
+
+	/* PTIMER */
+	ret = engine->timer.init(dev);
+	if (ret)
+		return ret;
+
+	/* PFB */
+	ret = engine->fb.init(dev);
+	if (ret)
+		return ret;
+
+	/* PGRAPH */
+	ret = engine->graph.init(dev);
+	if (ret)
+		return ret;
+
+	/* PFIFO */
+	ret = engine->fifo.init(dev);
+	if (ret)
+		return ret;
+
+	/* this call irq_preinstall, register irq handler and
+	 * call irq_postinstall
+	 */
+	ret = drm_irq_install(dev);
+	if (ret)
+		return ret;
+
+	ret = drm_vblank_init(dev, 0);
+	if (ret)
+		return ret;
+
+	/* what about PVIDEO/PCRTC/PRAMDAC etc? */
+
+	ret = nouveau_channel_alloc(dev, &dev_priv->channel,
+				    (struct drm_file *)-2,
+				    NvDmaFB, NvDmaTT);
+	if (ret)
+		return ret;
+
+	gpuobj = NULL;
+	ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
+				     0, nouveau_mem_fb_amount(dev),
+				     NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
+				     &gpuobj);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM,
+				     gpuobj, NULL);
+	if (ret) {
+		nouveau_gpuobj_del(dev, &gpuobj);
+		return ret;
+	}
+
+	gpuobj = NULL;
+	ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
+					  dev_priv->gart_info.aper_size,
+					  NV_DMA_ACCESS_RW, &gpuobj, NULL);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART,
+				     gpuobj, NULL);
+	if (ret) {
+		nouveau_gpuobj_del(dev, &gpuobj);
+		return ret;
+	}
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		if (dev_priv->card_type >= NV_50) {
+			ret = nv50_display_create(dev);
+			if (ret)
+				return ret;
+		} else {
+			ret = nv04_display_create(dev);
+			if (ret)
+				return ret;
+		}
+	}
+
+	ret = nouveau_backlight_init(dev);
+	if (ret)
+		NV_ERROR(dev, "Error %d registering backlight\n", ret);
+
+	dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		drm_helper_initial_config(dev);
+
+	return 0;
+}
+
+static void nouveau_card_takedown(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_engine *engine = &dev_priv->engine;
+
+	NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
+
+	if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
+		nouveau_backlight_exit(dev);
+
+		if (dev_priv->channel) {
+			nouveau_channel_free(dev_priv->channel);
+			dev_priv->channel = NULL;
+		}
+
+		engine->fifo.takedown(dev);
+		engine->graph.takedown(dev);
+		engine->fb.takedown(dev);
+		engine->timer.takedown(dev);
+		engine->mc.takedown(dev);
+
+		mutex_lock(&dev->struct_mutex);
+		ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
+		mutex_unlock(&dev->struct_mutex);
+		nouveau_sgdma_takedown(dev);
+
+		nouveau_gpuobj_takedown(dev);
+		nouveau_mem_close(dev);
+		engine->instmem.takedown(dev);
+
+		if (drm_core_check_feature(dev, DRIVER_MODESET))
+			drm_irq_uninstall(dev);
+
+		nouveau_gpuobj_late_takedown(dev);
+		nouveau_bios_takedown(dev);
+
+		vga_client_register(dev->pdev, NULL, NULL, NULL);
+
+		dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
+	}
+}
+
+/* here a client dies, release the stuff that was allocated for its
+ * file_priv */
+void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+	nouveau_channel_cleanup(dev, file_priv);
+}
+
+/* first module load, setup the mmio/fb mapping */
+/* KMS: we need mmio at load time, not when the first drm client opens. */
+int nouveau_firstopen(struct drm_device *dev)
+{
+	return 0;
+}
+
+/* if we have an OF card, copy vbios to RAMIN */
+static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev)
+{
+#if defined(__powerpc__)
+	int size, i;
+	const uint32_t *bios;
+	struct device_node *dn = pci_device_to_OF_node(dev->pdev);
+	if (!dn) {
+		NV_INFO(dev, "Unable to get the OF node\n");
+		return;
+	}
+
+	bios = of_get_property(dn, "NVDA,BMP", &size);
+	if (bios) {
+		for (i = 0; i < size; i += 4)
+			nv_wi32(dev, i, bios[i/4]);
+		NV_INFO(dev, "OF bios successfully copied (%d bytes)\n", size);
+	} else {
+		NV_INFO(dev, "Unable to get the OF bios\n");
+	}
+#endif
+}
+
+int nouveau_load(struct drm_device *dev, unsigned long flags)
+{
+	struct drm_nouveau_private *dev_priv;
+	uint32_t reg0;
+	resource_size_t mmio_start_offs;
+
+	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+	if (!dev_priv)
+		return -ENOMEM;
+	dev->dev_private = dev_priv;
+	dev_priv->dev = dev;
+
+	dev_priv->flags = flags & NOUVEAU_FLAGS;
+	dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
+
+	NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
+		 dev->pci_vendor, dev->pci_device, dev->pdev->class);
+
+	dev_priv->acpi_dsm = nouveau_dsm_probe(dev);
+
+	if (dev_priv->acpi_dsm)
+		nouveau_hybrid_setup(dev);
+
+	dev_priv->wq = create_workqueue("nouveau");
+	if (!dev_priv->wq)
+		return -EINVAL;
+
+	/* resource 0 is mmio regs */
+	/* resource 1 is linear FB */
+	/* resource 2 is RAMIN (mmio regs + 0x1000000) */
+	/* resource 6 is bios */
+
+	/* map the mmio regs */
+	mmio_start_offs = pci_resource_start(dev->pdev, 0);
+	dev_priv->mmio = ioremap(mmio_start_offs, 0x00800000);
+	if (!dev_priv->mmio) {
+		NV_ERROR(dev, "Unable to initialize the mmio mapping. "
+			 "Please report your setup to " DRIVER_EMAIL "\n");
+		return -EINVAL;
+	}
+	NV_DEBUG(dev, "regs mapped ok at 0x%llx\n",
+					(unsigned long long)mmio_start_offs);
+
+#ifdef __BIG_ENDIAN
+	/* Put the card in BE mode if it's not */
+	if (nv_rd32(dev, NV03_PMC_BOOT_1))
+		nv_wr32(dev, NV03_PMC_BOOT_1, 0x00000001);
+
+	DRM_MEMORYBARRIER();
+#endif
+
+	/* Time to determine the card architecture */
+	reg0 = nv_rd32(dev, NV03_PMC_BOOT_0);
+
+	/* We're dealing with >=NV10 */
+	if ((reg0 & 0x0f000000) > 0) {
+		/* Bit 27-20 contain the architecture in hex */
+		dev_priv->chipset = (reg0 & 0xff00000) >> 20;
+	/* NV04 or NV05 */
+	} else if ((reg0 & 0xff00fff0) == 0x20004000) {
+		dev_priv->chipset = 0x04;
+	} else
+		dev_priv->chipset = 0xff;
+
+	switch (dev_priv->chipset & 0xf0) {
+	case 0x00:
+	case 0x10:
+	case 0x20:
+	case 0x30:
+		dev_priv->card_type = dev_priv->chipset & 0xf0;
+		break;
+	case 0x40:
+	case 0x60:
+		dev_priv->card_type = NV_40;
+		break;
+	case 0x50:
+	case 0x80:
+	case 0x90:
+	case 0xa0:
+		dev_priv->card_type = NV_50;
+		break;
+	default:
+		NV_INFO(dev, "Unsupported chipset 0x%08x\n", reg0);
+		return -EINVAL;
+	}
+
+	NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
+		dev_priv->card_type, reg0);
+
+	/* map larger RAMIN aperture on NV40 cards */
+	dev_priv->ramin  = NULL;
+	if (dev_priv->card_type >= NV_40) {
+		int ramin_bar = 2;
+		if (pci_resource_len(dev->pdev, ramin_bar) == 0)
+			ramin_bar = 3;
+
+		dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar);
+		dev_priv->ramin = ioremap(
+				pci_resource_start(dev->pdev, ramin_bar),
+				dev_priv->ramin_size);
+		if (!dev_priv->ramin) {
+			NV_ERROR(dev, "Failed to init RAMIN mapping, "
+				      "limited instance memory available\n");
+		}
+	}
+
+	/* On older cards (or if the above failed), create a map covering
+	 * the BAR0 PRAMIN aperture */
+	if (!dev_priv->ramin) {
+		dev_priv->ramin_size = 1 * 1024 * 1024;
+		dev_priv->ramin = ioremap(mmio_start_offs + NV_RAMIN,
+							dev_priv->ramin_size);
+		if (!dev_priv->ramin) {
+			NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n");
+			return -ENOMEM;
+		}
+	}
+
+	nouveau_OF_copy_vbios_to_ramin(dev);
+
+	/* Special flags */
+	if (dev->pci_device == 0x01a0)
+		dev_priv->flags |= NV_NFORCE;
+	else if (dev->pci_device == 0x01f0)
+		dev_priv->flags |= NV_NFORCE2;
+
+	/* For kernel modesetting, init card now and bring up fbcon */
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		int ret = nouveau_card_init(dev);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void nouveau_close(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	/* In the case of an error dev_priv may not be be allocated yet */
+	if (dev_priv && dev_priv->card_type)
+		nouveau_card_takedown(dev);
+}
+
+/* KMS: we need mmio at load time, not when the first drm client opens. */
+void nouveau_lastclose(struct drm_device *dev)
+{
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return;
+
+	nouveau_close(dev);
+}
+
+int nouveau_unload(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		if (dev_priv->card_type >= NV_50)
+			nv50_display_destroy(dev);
+		else
+			nv04_display_destroy(dev);
+		nouveau_close(dev);
+	}
+
+	iounmap(dev_priv->mmio);
+	iounmap(dev_priv->ramin);
+
+	kfree(dev_priv);
+	dev->dev_private = NULL;
+	return 0;
+}
+
+int
+nouveau_ioctl_card_init(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	return nouveau_card_init(dev);
+}
+
+int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
+						struct drm_file *file_priv)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct drm_nouveau_getparam *getparam = data;
+
+	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+	switch (getparam->param) {
+	case NOUVEAU_GETPARAM_CHIPSET_ID:
+		getparam->value = dev_priv->chipset;
+		break;
+	case NOUVEAU_GETPARAM_PCI_VENDOR:
+		getparam->value = dev->pci_vendor;
+		break;
+	case NOUVEAU_GETPARAM_PCI_DEVICE:
+		getparam->value = dev->pci_device;
+		break;
+	case NOUVEAU_GETPARAM_BUS_TYPE:
+		if (drm_device_is_agp(dev))
+			getparam->value = NV_AGP;
+		else if (drm_device_is_pcie(dev))
+			getparam->value = NV_PCIE;
+		else
+			getparam->value = NV_PCI;
+		break;
+	case NOUVEAU_GETPARAM_FB_PHYSICAL:
+		getparam->value = dev_priv->fb_phys;
+		break;
+	case NOUVEAU_GETPARAM_AGP_PHYSICAL:
+		getparam->value = dev_priv->gart_info.aper_base;
+		break;
+	case NOUVEAU_GETPARAM_PCI_PHYSICAL:
+		if (dev->sg) {
+			getparam->value = (unsigned long)dev->sg->virtual;
+		} else {
+			NV_ERROR(dev, "Requested PCIGART address, "
+					"while no PCIGART was created\n");
+			return -EINVAL;
+		}
+		break;
+	case NOUVEAU_GETPARAM_FB_SIZE:
+		getparam->value = dev_priv->fb_available_size;
+		break;
+	case NOUVEAU_GETPARAM_AGP_SIZE:
+		getparam->value = dev_priv->gart_info.aper_size;
+		break;
+	case NOUVEAU_GETPARAM_VM_VRAM_BASE:
+		getparam->value = dev_priv->vm_vram_base;
+		break;
+	default:
+		NV_ERROR(dev, "unknown parameter %lld\n", getparam->param);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int
+nouveau_ioctl_setparam(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct drm_nouveau_setparam *setparam = data;
+
+	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+	switch (setparam->param) {
+	default:
+		NV_ERROR(dev, "unknown parameter %lld\n", setparam->param);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Wait until (value(reg) & mask) == val, up until timeout has hit */
+bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
+			uint32_t reg, uint32_t mask, uint32_t val)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+	uint64_t start = ptimer->read(dev);
+
+	do {
+		if ((nv_rd32(dev, reg) & mask) == val)
+			return true;
+	} while (ptimer->read(dev) - start < timeout);
+
+	return false;
+}
+
+/* Waits for PGRAPH to go completely idle */
+bool nouveau_wait_for_idle(struct drm_device *dev)
+{
+	if (!nv_wait(NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) {
+		NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n",
+			 nv_rd32(dev, NV04_PGRAPH_STATUS));
+		return false;
+	}
+
+	return true;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
new file mode 100644
index 0000000..187eb84
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
+ * All Rights Reserved.
+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+
+static struct vm_operations_struct nouveau_ttm_vm_ops;
+static const struct vm_operations_struct *ttm_vm_ops;
+
+static int
+nouveau_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct ttm_buffer_object *bo = vma->vm_private_data;
+	int ret;
+
+	if (unlikely(bo == NULL))
+		return VM_FAULT_NOPAGE;
+
+	ret = ttm_vm_ops->fault(vma, vmf);
+	return ret;
+}
+
+int
+nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct drm_nouveau_private *dev_priv =
+		file_priv->minor->dev->dev_private;
+	int ret;
+
+	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+		return drm_mmap(filp, vma);
+
+	ret = ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (unlikely(ttm_vm_ops == NULL)) {
+		ttm_vm_ops = vma->vm_ops;
+		nouveau_ttm_vm_ops = *ttm_vm_ops;
+		nouveau_ttm_vm_ops.fault = &nouveau_ttm_fault;
+	}
+
+	vma->vm_ops = &nouveau_ttm_vm_ops;
+	return 0;
+}
+
+static int
+nouveau_ttm_mem_global_init(struct ttm_global_reference *ref)
+{
+	return ttm_mem_global_init(ref->object);
+}
+
+static void
+nouveau_ttm_mem_global_release(struct ttm_global_reference *ref)
+{
+	ttm_mem_global_release(ref->object);
+}
+
+int
+nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
+{
+	struct ttm_global_reference *global_ref;
+	int ret;
+
+	global_ref = &dev_priv->ttm.mem_global_ref;
+	global_ref->global_type = TTM_GLOBAL_TTM_MEM;
+	global_ref->size = sizeof(struct ttm_mem_global);
+	global_ref->init = &nouveau_ttm_mem_global_init;
+	global_ref->release = &nouveau_ttm_mem_global_release;
+
+	ret = ttm_global_item_ref(global_ref);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed setting up TTM memory accounting\n");
+		dev_priv->ttm.mem_global_ref.release = NULL;
+		return ret;
+	}
+
+	dev_priv->ttm.bo_global_ref.mem_glob = global_ref->object;
+	global_ref = &dev_priv->ttm.bo_global_ref.ref;
+	global_ref->global_type = TTM_GLOBAL_TTM_BO;
+	global_ref->size = sizeof(struct ttm_bo_global);
+	global_ref->init = &ttm_bo_global_init;
+	global_ref->release = &ttm_bo_global_release;
+
+	ret = ttm_global_item_ref(global_ref);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed setting up TTM BO subsystem\n");
+		ttm_global_item_unref(&dev_priv->ttm.mem_global_ref);
+		dev_priv->ttm.mem_global_ref.release = NULL;
+		return ret;
+	}
+
+	return 0;
+}
+
+void
+nouveau_ttm_global_release(struct drm_nouveau_private *dev_priv)
+{
+	if (dev_priv->ttm.mem_global_ref.release == NULL)
+		return;
+
+	ttm_global_item_unref(&dev_priv->ttm.bo_global_ref.ref);
+	ttm_global_item_unref(&dev_priv->ttm.mem_global_ref);
+	dev_priv->ttm.mem_global_ref.release = NULL;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
new file mode 100644
index 0000000..b913636
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -0,0 +1,1002 @@
+/*
+ * Copyright 1993-2003 NVIDIA, Corporation
+ * Copyright 2006 Dave Airlie
+ * Copyright 2007 Maarten Maathuis
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_crtc.h"
+#include "nouveau_fb.h"
+#include "nouveau_hw.h"
+#include "nvreg.h"
+
+static int
+nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+			struct drm_framebuffer *old_fb);
+
+static void
+crtc_wr_cio_state(struct drm_crtc *crtc, struct nv04_crtc_reg *crtcstate, int index)
+{
+	NVWriteVgaCrtc(crtc->dev, nouveau_crtc(crtc)->index, index,
+		       crtcstate->CRTC[index]);
+}
+
+static void nv_crtc_set_digital_vibrance(struct drm_crtc *crtc, int level)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+	struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+
+	regp->CRTC[NV_CIO_CRE_CSB] = nv_crtc->saturation = level;
+	if (nv_crtc->saturation && nv_gf4_disp_arch(crtc->dev)) {
+		regp->CRTC[NV_CIO_CRE_CSB] = 0x80;
+		regp->CRTC[NV_CIO_CRE_5B] = nv_crtc->saturation << 2;
+		crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_5B);
+	}
+	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_CSB);
+}
+
+static void nv_crtc_set_image_sharpening(struct drm_crtc *crtc, int level)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+	struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+
+	nv_crtc->sharpness = level;
+	if (level < 0)	/* blur is in hw range 0x3f -> 0x20 */
+		level += 0x40;
+	regp->ramdac_634 = level;
+	NVWriteRAMDAC(crtc->dev, nv_crtc->index, NV_PRAMDAC_634, regp->ramdac_634);
+}
+
+#define PLLSEL_VPLL1_MASK				\
+	(NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_VPLL	\
+	 | NV_PRAMDAC_PLL_COEFF_SELECT_VCLK_RATIO_DB2)
+#define PLLSEL_VPLL2_MASK				\
+	(NV_PRAMDAC_PLL_COEFF_SELECT_PLL_SOURCE_VPLL2		\
+	 | NV_PRAMDAC_PLL_COEFF_SELECT_VCLK2_RATIO_DB2)
+#define PLLSEL_TV_MASK					\
+	(NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1		\
+	 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1		\
+	 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2	\
+	 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2)
+
+/* NV4x 0x40.. pll notes:
+ * gpu pll: 0x4000 + 0x4004
+ * ?gpu? pll: 0x4008 + 0x400c
+ * vpll1: 0x4010 + 0x4014
+ * vpll2: 0x4018 + 0x401c
+ * mpll: 0x4020 + 0x4024
+ * mpll: 0x4038 + 0x403c
+ *
+ * the first register of each pair has some unknown details:
+ * bits 0-7: redirected values from elsewhere? (similar to PLL_SETUP_CONTROL?)
+ * bits 20-23: (mpll) something to do with post divider?
+ * bits 28-31: related to single stage mode? (bit 8/12)
+ */
+
+static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mode * mode, int dot_clock)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct nv04_mode_state *state = &dev_priv->mode_reg;
+	struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index];
+	struct nouveau_pll_vals *pv = &regp->pllvals;
+	struct pll_lims pll_lim;
+
+	if (get_pll_limits(dev, nv_crtc->index ? VPLL2 : VPLL1, &pll_lim))
+		return;
+
+	/* NM2 == 0 is used to determine single stage mode on two stage plls */
+	pv->NM2 = 0;
+
+	/* for newer nv4x the blob uses only the first stage of the vpll below a
+	 * certain clock.  for a certain nv4b this is 150MHz.  since the max
+	 * output frequency of the first stage for this card is 300MHz, it is
+	 * assumed the threshold is given by vco1 maxfreq/2
+	 */
+	/* for early nv4x, specifically nv40 and *some* nv43 (devids 0 and 6,
+	 * not 8, others unknown), the blob always uses both plls.  no problem
+	 * has yet been observed in allowing the use a single stage pll on all
+	 * nv43 however.  the behaviour of single stage use is untested on nv40
+	 */
+	if (dev_priv->chipset > 0x40 && dot_clock <= (pll_lim.vco1.maxfreq / 2))
+		memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2));
+
+	if (!nouveau_calc_pll_mnp(dev, &pll_lim, dot_clock, pv))
+		return;
+
+	state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK;
+
+	/* The blob uses this always, so let's do the same */
+	if (dev_priv->card_type == NV_40)
+		state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE;
+	/* again nv40 and some nv43 act more like nv3x as described above */
+	if (dev_priv->chipset < 0x41)
+		state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL |
+				 NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL;
+	state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK;
+
+	if (pv->NM2)
+		NV_TRACE(dev, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n",
+			 pv->N1, pv->N2, pv->M1, pv->M2, pv->log2P);
+	else
+		NV_TRACE(dev, "vpll: n %d m %d log2p %d\n",
+			 pv->N1, pv->M1, pv->log2P);
+
+	nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
+}
+
+static void
+nv_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	unsigned char seq1 = 0, crtc17 = 0;
+	unsigned char crtc1A;
+
+	NV_TRACE(dev, "Setting dpms mode %d on CRTC %d\n", mode,
+							nv_crtc->index);
+
+	if (nv_crtc->last_dpms == mode) /* Don't do unnecesary mode changes. */
+		return;
+
+	nv_crtc->last_dpms = mode;
+
+	if (nv_two_heads(dev))
+		NVSetOwner(dev, nv_crtc->index);
+
+	/* nv4ref indicates these two RPC1 bits inhibit h/v sync */
+	crtc1A = NVReadVgaCrtc(dev, nv_crtc->index,
+					NV_CIO_CRE_RPC1_INDEX) & ~0xC0;
+	switch (mode) {
+	case DRM_MODE_DPMS_STANDBY:
+		/* Screen: Off; HSync: Off, VSync: On -- Not Supported */
+		seq1 = 0x20;
+		crtc17 = 0x80;
+		crtc1A |= 0x80;
+		break;
+	case DRM_MODE_DPMS_SUSPEND:
+		/* Screen: Off; HSync: On, VSync: Off -- Not Supported */
+		seq1 = 0x20;
+		crtc17 = 0x80;
+		crtc1A |= 0x40;
+		break;
+	case DRM_MODE_DPMS_OFF:
+		/* Screen: Off; HSync: Off, VSync: Off */
+		seq1 = 0x20;
+		crtc17 = 0x00;
+		crtc1A |= 0xC0;
+		break;
+	case DRM_MODE_DPMS_ON:
+	default:
+		/* Screen: On; HSync: On, VSync: On */
+		seq1 = 0x00;
+		crtc17 = 0x80;
+		break;
+	}
+
+	NVVgaSeqReset(dev, nv_crtc->index, true);
+	/* Each head has it's own sequencer, so we can turn it off when we want */
+	seq1 |= (NVReadVgaSeq(dev, nv_crtc->index, NV_VIO_SR_CLOCK_INDEX) & ~0x20);
+	NVWriteVgaSeq(dev, nv_crtc->index, NV_VIO_SR_CLOCK_INDEX, seq1);
+	crtc17 |= (NVReadVgaCrtc(dev, nv_crtc->index, NV_CIO_CR_MODE_INDEX) & ~0x80);
+	mdelay(10);
+	NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CR_MODE_INDEX, crtc17);
+	NVVgaSeqReset(dev, nv_crtc->index, false);
+
+	NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A);
+}
+
+static bool
+nv_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
+		   struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void
+nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+	struct drm_framebuffer *fb = crtc->fb;
+
+	/* Calculate our timings */
+	int horizDisplay	= (mode->crtc_hdisplay >> 3) 	- 1;
+	int horizStart		= (mode->crtc_hsync_start >> 3) 	- 1;
+	int horizEnd		= (mode->crtc_hsync_end >> 3) 	- 1;
+	int horizTotal		= (mode->crtc_htotal >> 3)		- 5;
+	int horizBlankStart	= (mode->crtc_hdisplay >> 3)		- 1;
+	int horizBlankEnd	= (mode->crtc_htotal >> 3)		- 1;
+	int vertDisplay		= mode->crtc_vdisplay			- 1;
+	int vertStart		= mode->crtc_vsync_start 		- 1;
+	int vertEnd		= mode->crtc_vsync_end			- 1;
+	int vertTotal		= mode->crtc_vtotal 			- 2;
+	int vertBlankStart	= mode->crtc_vdisplay 			- 1;
+	int vertBlankEnd	= mode->crtc_vtotal			- 1;
+
+	struct drm_encoder *encoder;
+	bool fp_output = false;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+		if (encoder->crtc == crtc &&
+		    (nv_encoder->dcb->type == OUTPUT_LVDS ||
+		     nv_encoder->dcb->type == OUTPUT_TMDS))
+			fp_output = true;
+	}
+
+	if (fp_output) {
+		vertStart = vertTotal - 3;
+		vertEnd = vertTotal - 2;
+		vertBlankStart = vertStart;
+		horizStart = horizTotal - 5;
+		horizEnd = horizTotal - 2;
+		horizBlankEnd = horizTotal + 4;
+#if 0
+		if (dev->overlayAdaptor && dev_priv->card_type >= NV_10)
+			/* This reportedly works around some video overlay bandwidth problems */
+			horizTotal += 2;
+#endif
+	}
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		vertTotal |= 1;
+
+#if 0
+	ErrorF("horizDisplay: 0x%X \n", horizDisplay);
+	ErrorF("horizStart: 0x%X \n", horizStart);
+	ErrorF("horizEnd: 0x%X \n", horizEnd);
+	ErrorF("horizTotal: 0x%X \n", horizTotal);
+	ErrorF("horizBlankStart: 0x%X \n", horizBlankStart);
+	ErrorF("horizBlankEnd: 0x%X \n", horizBlankEnd);
+	ErrorF("vertDisplay: 0x%X \n", vertDisplay);
+	ErrorF("vertStart: 0x%X \n", vertStart);
+	ErrorF("vertEnd: 0x%X \n", vertEnd);
+	ErrorF("vertTotal: 0x%X \n", vertTotal);
+	ErrorF("vertBlankStart: 0x%X \n", vertBlankStart);
+	ErrorF("vertBlankEnd: 0x%X \n", vertBlankEnd);
+#endif
+
+	/*
+	* compute correct Hsync & Vsync polarity
+	*/
+	if ((mode->flags & (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))
+		&& (mode->flags & (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) {
+
+		regp->MiscOutReg = 0x23;
+		if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+			regp->MiscOutReg |= 0x40;
+		if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+			regp->MiscOutReg |= 0x80;
+	} else {
+		int vdisplay = mode->vdisplay;
+		if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+			vdisplay *= 2;
+		if (mode->vscan > 1)
+			vdisplay *= mode->vscan;
+		if (vdisplay < 400)
+			regp->MiscOutReg = 0xA3;	/* +hsync -vsync */
+		else if (vdisplay < 480)
+			regp->MiscOutReg = 0x63;	/* -hsync +vsync */
+		else if (vdisplay < 768)
+			regp->MiscOutReg = 0xE3;	/* -hsync -vsync */
+		else
+			regp->MiscOutReg = 0x23;	/* +hsync +vsync */
+	}
+
+	regp->MiscOutReg |= (mode->clock_index & 0x03) << 2;
+
+	/*
+	 * Time Sequencer
+	 */
+	regp->Sequencer[NV_VIO_SR_RESET_INDEX] = 0x00;
+	/* 0x20 disables the sequencer */
+	if (mode->flags & DRM_MODE_FLAG_CLKDIV2)
+		regp->Sequencer[NV_VIO_SR_CLOCK_INDEX] = 0x29;
+	else
+		regp->Sequencer[NV_VIO_SR_CLOCK_INDEX] = 0x21;
+	regp->Sequencer[NV_VIO_SR_PLANE_MASK_INDEX] = 0x0F;
+	regp->Sequencer[NV_VIO_SR_CHAR_MAP_INDEX] = 0x00;
+	regp->Sequencer[NV_VIO_SR_MEM_MODE_INDEX] = 0x0E;
+
+	/*
+	 * CRTC
+	 */
+	regp->CRTC[NV_CIO_CR_HDT_INDEX] = horizTotal;
+	regp->CRTC[NV_CIO_CR_HDE_INDEX] = horizDisplay;
+	regp->CRTC[NV_CIO_CR_HBS_INDEX] = horizBlankStart;
+	regp->CRTC[NV_CIO_CR_HBE_INDEX] = (1 << 7) |
+					  XLATE(horizBlankEnd, 0, NV_CIO_CR_HBE_4_0);
+	regp->CRTC[NV_CIO_CR_HRS_INDEX] = horizStart;
+	regp->CRTC[NV_CIO_CR_HRE_INDEX] = XLATE(horizBlankEnd, 5, NV_CIO_CR_HRE_HBE_5) |
+					  XLATE(horizEnd, 0, NV_CIO_CR_HRE_4_0);
+	regp->CRTC[NV_CIO_CR_VDT_INDEX] = vertTotal;
+	regp->CRTC[NV_CIO_CR_OVL_INDEX] = XLATE(vertStart, 9, NV_CIO_CR_OVL_VRS_9) |
+					  XLATE(vertDisplay, 9, NV_CIO_CR_OVL_VDE_9) |
+					  XLATE(vertTotal, 9, NV_CIO_CR_OVL_VDT_9) |
+					  (1 << 4) |
+					  XLATE(vertBlankStart, 8, NV_CIO_CR_OVL_VBS_8) |
+					  XLATE(vertStart, 8, NV_CIO_CR_OVL_VRS_8) |
+					  XLATE(vertDisplay, 8, NV_CIO_CR_OVL_VDE_8) |
+					  XLATE(vertTotal, 8, NV_CIO_CR_OVL_VDT_8);
+	regp->CRTC[NV_CIO_CR_RSAL_INDEX] = 0x00;
+	regp->CRTC[NV_CIO_CR_CELL_HT_INDEX] = ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ? MASK(NV_CIO_CR_CELL_HT_SCANDBL) : 0) |
+					      1 << 6 |
+					      XLATE(vertBlankStart, 9, NV_CIO_CR_CELL_HT_VBS_9);
+	regp->CRTC[NV_CIO_CR_CURS_ST_INDEX] = 0x00;
+	regp->CRTC[NV_CIO_CR_CURS_END_INDEX] = 0x00;
+	regp->CRTC[NV_CIO_CR_SA_HI_INDEX] = 0x00;
+	regp->CRTC[NV_CIO_CR_SA_LO_INDEX] = 0x00;
+	regp->CRTC[NV_CIO_CR_TCOFF_HI_INDEX] = 0x00;
+	regp->CRTC[NV_CIO_CR_TCOFF_LO_INDEX] = 0x00;
+	regp->CRTC[NV_CIO_CR_VRS_INDEX] = vertStart;
+	regp->CRTC[NV_CIO_CR_VRE_INDEX] = 1 << 5 | XLATE(vertEnd, 0, NV_CIO_CR_VRE_3_0);
+	regp->CRTC[NV_CIO_CR_VDE_INDEX] = vertDisplay;
+	/* framebuffer can be larger than crtc scanout area. */
+	regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitch / 8;
+	regp->CRTC[NV_CIO_CR_ULINE_INDEX] = 0x00;
+	regp->CRTC[NV_CIO_CR_VBS_INDEX] = vertBlankStart;
+	regp->CRTC[NV_CIO_CR_VBE_INDEX] = vertBlankEnd;
+	regp->CRTC[NV_CIO_CR_MODE_INDEX] = 0x43;
+	regp->CRTC[NV_CIO_CR_LCOMP_INDEX] = 0xff;
+
+	/*
+	 * Some extended CRTC registers (they are not saved with the rest of the vga regs).
+	 */
+
+	/* framebuffer can be larger than crtc scanout area. */
+	regp->CRTC[NV_CIO_CRE_RPC0_INDEX] = XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+	regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ?
+					    MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00;
+	regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) |
+					   XLATE(vertBlankStart, 10, NV_CIO_CRE_LSR_VBS_10) |
+					   XLATE(vertStart, 10, NV_CIO_CRE_LSR_VRS_10) |
+					   XLATE(vertDisplay, 10, NV_CIO_CRE_LSR_VDE_10) |
+					   XLATE(vertTotal, 10, NV_CIO_CRE_LSR_VDT_10);
+	regp->CRTC[NV_CIO_CRE_HEB__INDEX] = XLATE(horizStart, 8, NV_CIO_CRE_HEB_HRS_8) |
+					    XLATE(horizBlankStart, 8, NV_CIO_CRE_HEB_HBS_8) |
+					    XLATE(horizDisplay, 8, NV_CIO_CRE_HEB_HDE_8) |
+					    XLATE(horizTotal, 8, NV_CIO_CRE_HEB_HDT_8);
+	regp->CRTC[NV_CIO_CRE_EBR_INDEX] = XLATE(vertBlankStart, 11, NV_CIO_CRE_EBR_VBS_11) |
+					   XLATE(vertStart, 11, NV_CIO_CRE_EBR_VRS_11) |
+					   XLATE(vertDisplay, 11, NV_CIO_CRE_EBR_VDE_11) |
+					   XLATE(vertTotal, 11, NV_CIO_CRE_EBR_VDT_11);
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		horizTotal = (horizTotal >> 1) & ~1;
+		regp->CRTC[NV_CIO_CRE_ILACE__INDEX] = horizTotal;
+		regp->CRTC[NV_CIO_CRE_HEB__INDEX] |= XLATE(horizTotal, 8, NV_CIO_CRE_HEB_ILC_8);
+	} else
+		regp->CRTC[NV_CIO_CRE_ILACE__INDEX] = 0xff;  /* interlace off */
+
+	/*
+	* Graphics Display Controller
+	*/
+	regp->Graphics[NV_VIO_GX_SR_INDEX] = 0x00;
+	regp->Graphics[NV_VIO_GX_SREN_INDEX] = 0x00;
+	regp->Graphics[NV_VIO_GX_CCOMP_INDEX] = 0x00;
+	regp->Graphics[NV_VIO_GX_ROP_INDEX] = 0x00;
+	regp->Graphics[NV_VIO_GX_READ_MAP_INDEX] = 0x00;
+	regp->Graphics[NV_VIO_GX_MODE_INDEX] = 0x40; /* 256 color mode */
+	regp->Graphics[NV_VIO_GX_MISC_INDEX] = 0x05; /* map 64k mem + graphic mode */
+	regp->Graphics[NV_VIO_GX_DONT_CARE_INDEX] = 0x0F;
+	regp->Graphics[NV_VIO_GX_BIT_MASK_INDEX] = 0xFF;
+
+	regp->Attribute[0]  = 0x00; /* standard colormap translation */
+	regp->Attribute[1]  = 0x01;
+	regp->Attribute[2]  = 0x02;
+	regp->Attribute[3]  = 0x03;
+	regp->Attribute[4]  = 0x04;
+	regp->Attribute[5]  = 0x05;
+	regp->Attribute[6]  = 0x06;
+	regp->Attribute[7]  = 0x07;
+	regp->Attribute[8]  = 0x08;
+	regp->Attribute[9]  = 0x09;
+	regp->Attribute[10] = 0x0A;
+	regp->Attribute[11] = 0x0B;
+	regp->Attribute[12] = 0x0C;
+	regp->Attribute[13] = 0x0D;
+	regp->Attribute[14] = 0x0E;
+	regp->Attribute[15] = 0x0F;
+	regp->Attribute[NV_CIO_AR_MODE_INDEX] = 0x01; /* Enable graphic mode */
+	/* Non-vga */
+	regp->Attribute[NV_CIO_AR_OSCAN_INDEX] = 0x00;
+	regp->Attribute[NV_CIO_AR_PLANE_INDEX] = 0x0F; /* enable all color planes */
+	regp->Attribute[NV_CIO_AR_HPP_INDEX] = 0x00;
+	regp->Attribute[NV_CIO_AR_CSEL_INDEX] = 0x00;
+}
+
+/**
+ * Sets up registers for the given mode/adjusted_mode pair.
+ *
+ * The clocks, CRTCs and outputs attached to this CRTC must be off.
+ *
+ * This shouldn't enable any clocks, CRTCs, or outputs, but they should
+ * be easily turned on/off after this.
+ */
+static void
+nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+	struct nv04_crtc_reg *savep = &dev_priv->saved_reg.crtc_reg[nv_crtc->index];
+	struct drm_encoder *encoder;
+	bool lvds_output = false, tmds_output = false, tv_output = false,
+		off_chip_digital = false;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+		bool digital = false;
+
+		if (encoder->crtc != crtc)
+			continue;
+
+		if (nv_encoder->dcb->type == OUTPUT_LVDS)
+			digital = lvds_output = true;
+		if (nv_encoder->dcb->type == OUTPUT_TV)
+			tv_output = true;
+		if (nv_encoder->dcb->type == OUTPUT_TMDS)
+			digital = tmds_output = true;
+		if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP && digital)
+			off_chip_digital = true;
+	}
+
+	/* Registers not directly related to the (s)vga mode */
+
+	/* What is the meaning of this register? */
+	/* A few popular values are 0x18, 0x1c, 0x38, 0x3c */
+	regp->CRTC[NV_CIO_CRE_ENH_INDEX] = savep->CRTC[NV_CIO_CRE_ENH_INDEX] & ~(1<<5);
+
+	regp->crtc_eng_ctrl = 0;
+	/* Except for rare conditions I2C is enabled on the primary crtc */
+	if (nv_crtc->index == 0)
+		regp->crtc_eng_ctrl |= NV_CRTC_FSEL_I2C;
+#if 0
+	/* Set overlay to desired crtc. */
+	if (dev->overlayAdaptor) {
+		NVPortPrivPtr pPriv = GET_OVERLAY_PRIVATE(dev);
+		if (pPriv->overlayCRTC == nv_crtc->index)
+			regp->crtc_eng_ctrl |= NV_CRTC_FSEL_OVERLAY;
+	}
+#endif
+
+	/* ADDRESS_SPACE_PNVM is the same as setting HCUR_ASI */
+	regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 |
+			     NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 |
+			     NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM;
+	if (dev_priv->chipset >= 0x11)
+		regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32;
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE;
+
+	/* Unblock some timings */
+	regp->CRTC[NV_CIO_CRE_53] = 0;
+	regp->CRTC[NV_CIO_CRE_54] = 0;
+
+	/* 0x00 is disabled, 0x11 is lvds, 0x22 crt and 0x88 tmds */
+	if (lvds_output)
+		regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x11;
+	else if (tmds_output)
+		regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x88;
+	else
+		regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x22;
+
+	/* These values seem to vary */
+	/* This register seems to be used by the bios to make certain decisions on some G70 cards? */
+	regp->CRTC[NV_CIO_CRE_SCRATCH4__INDEX] = savep->CRTC[NV_CIO_CRE_SCRATCH4__INDEX];
+
+	nv_crtc_set_digital_vibrance(crtc, nv_crtc->saturation);
+
+	/* probably a scratch reg, but kept for cargo-cult purposes:
+	 * bit0: crtc0?, head A
+	 * bit6: lvds, head A
+	 * bit7: (only in X), head A
+	 */
+	if (nv_crtc->index == 0)
+		regp->CRTC[NV_CIO_CRE_4B] = savep->CRTC[NV_CIO_CRE_4B] | 0x80;
+
+	/* The blob seems to take the current value from crtc 0, add 4 to that
+	 * and reuse the old value for crtc 1 */
+	regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] = dev_priv->saved_reg.crtc_reg[0].CRTC[NV_CIO_CRE_TVOUT_LATENCY];
+	if (!nv_crtc->index)
+		regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] += 4;
+
+	/* the blob sometimes sets |= 0x10 (which is the same as setting |=
+	 * 1 << 30 on 0x60.830), for no apparent reason */
+	regp->CRTC[NV_CIO_CRE_59] = off_chip_digital;
+
+	regp->crtc_830 = mode->crtc_vdisplay - 3;
+	regp->crtc_834 = mode->crtc_vdisplay - 1;
+
+	if (dev_priv->card_type == NV_40)
+		/* This is what the blob does */
+		regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850);
+
+	if (dev_priv->card_type >= NV_30)
+		regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
+
+	regp->crtc_cfg = NV_PCRTC_CONFIG_START_ADDRESS_HSYNC;
+
+	/* Some misc regs */
+	if (dev_priv->card_type == NV_40) {
+		regp->CRTC[NV_CIO_CRE_85] = 0xFF;
+		regp->CRTC[NV_CIO_CRE_86] = 0x1;
+	}
+
+	regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] = (crtc->fb->depth + 1) / 8;
+	/* Enable slaved mode (called MODE_TV in nv4ref.h) */
+	if (lvds_output || tmds_output || tv_output)
+		regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (1 << 7);
+
+	/* Generic PRAMDAC regs */
+
+	if (dev_priv->card_type >= NV_10)
+		/* Only bit that bios and blob set. */
+		regp->nv10_cursync = (1 << 25);
+
+	regp->ramdac_gen_ctrl = NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS |
+				NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL |
+				NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON;
+	if (crtc->fb->depth == 16)
+		regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
+	if (dev_priv->chipset >= 0x11)
+		regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG;
+
+	regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */
+	regp->tv_setup = 0;
+
+	nv_crtc_set_image_sharpening(crtc, nv_crtc->sharpness);
+
+	/* Some values the blob sets */
+	regp->ramdac_8c0 = 0x100;
+	regp->ramdac_a20 = 0x0;
+	regp->ramdac_a24 = 0xfffff;
+	regp->ramdac_a34 = 0x1;
+}
+
+/**
+ * Sets up registers for the given mode/adjusted_mode pair.
+ *
+ * The clocks, CRTCs and outputs attached to this CRTC must be off.
+ *
+ * This shouldn't enable any clocks, CRTCs, or outputs, but they should
+ * be easily turned on/off after this.
+ */
+static int
+nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+		 struct drm_display_mode *adjusted_mode,
+		 int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	NV_DEBUG(dev, "CTRC mode on CRTC %d:\n", nv_crtc->index);
+	drm_mode_debug_printmodeline(adjusted_mode);
+
+	/* unlock must come after turning off FP_TG_CONTROL in output_prepare */
+	nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1);
+
+	nv_crtc_mode_set_vga(crtc, adjusted_mode);
+	/* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */
+	if (dev_priv->card_type == NV_40)
+		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, dev_priv->mode_reg.sel_clk);
+	nv_crtc_mode_set_regs(crtc, adjusted_mode);
+	nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock);
+	return 0;
+}
+
+static void nv_crtc_save(struct drm_crtc *crtc)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+	struct nv04_mode_state *state = &dev_priv->mode_reg;
+	struct nv04_crtc_reg *crtc_state = &state->crtc_reg[nv_crtc->index];
+	struct nv04_mode_state *saved = &dev_priv->saved_reg;
+	struct nv04_crtc_reg *crtc_saved = &saved->crtc_reg[nv_crtc->index];
+
+	if (nv_two_heads(crtc->dev))
+		NVSetOwner(crtc->dev, nv_crtc->index);
+
+	nouveau_hw_save_state(crtc->dev, nv_crtc->index, saved);
+
+	/* init some state to saved value */
+	state->sel_clk = saved->sel_clk & ~(0x5 << 16);
+	crtc_state->CRTC[NV_CIO_CRE_LCD__INDEX] = crtc_saved->CRTC[NV_CIO_CRE_LCD__INDEX];
+	state->pllsel = saved->pllsel & ~(PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK);
+	crtc_state->gpio_ext = crtc_saved->gpio_ext;
+}
+
+static void nv_crtc_restore(struct drm_crtc *crtc)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+	int head = nv_crtc->index;
+	uint8_t saved_cr21 = dev_priv->saved_reg.crtc_reg[head].CRTC[NV_CIO_CRE_21];
+
+	if (nv_two_heads(crtc->dev))
+		NVSetOwner(crtc->dev, head);
+
+	nouveau_hw_load_state(crtc->dev, head, &dev_priv->saved_reg);
+	nv_lock_vga_crtc_shadow(crtc->dev, head, saved_cr21);
+
+	nv_crtc->last_dpms = NV_DPMS_CLEARED;
+}
+
+static void nv_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
+
+	if (nv_two_heads(dev))
+		NVSetOwner(dev, nv_crtc->index);
+
+	funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+	NVBlankScreen(dev, nv_crtc->index, true);
+
+	/* Some more preperation. */
+	NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA);
+	if (dev_priv->card_type == NV_40) {
+		uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900);
+		NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000);
+	}
+}
+
+static void nv_crtc_commit(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
+	struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+	nouveau_hw_load_state(dev, nv_crtc->index, &dev_priv->mode_reg);
+	nv04_crtc_mode_set_base(crtc, crtc->x, crtc->y, NULL);
+
+#ifdef __BIG_ENDIAN
+	/* turn on LFB swapping */
+	{
+		uint8_t tmp = NVReadVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RCR);
+		tmp |= MASK(NV_CIO_CRE_RCR_ENDIAN_BIG);
+		NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RCR, tmp);
+	}
+#endif
+
+	funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static void nv_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+	NV_DEBUG(crtc->dev, "\n");
+
+	if (!nv_crtc)
+		return;
+
+	drm_crtc_cleanup(crtc);
+
+	nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+	kfree(nv_crtc);
+}
+
+static void
+nv_crtc_gamma_load(struct drm_crtc *crtc)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct drm_device *dev = nv_crtc->base.dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct rgb { uint8_t r, g, b; } __attribute__((packed)) *rgbs;
+	int i;
+
+	rgbs = (struct rgb *)dev_priv->mode_reg.crtc_reg[nv_crtc->index].DAC;
+	for (i = 0; i < 256; i++) {
+		rgbs[i].r = nv_crtc->lut.r[i] >> 8;
+		rgbs[i].g = nv_crtc->lut.g[i] >> 8;
+		rgbs[i].b = nv_crtc->lut.b[i] >> 8;
+	}
+
+	nouveau_hw_load_state_palette(dev, nv_crtc->index, &dev_priv->mode_reg);
+}
+
+static void
+nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t size)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	int i;
+
+	if (size != 256)
+		return;
+
+	for (i = 0; i < 256; i++) {
+		nv_crtc->lut.r[i] = r[i];
+		nv_crtc->lut.g[i] = g[i];
+		nv_crtc->lut.b[i] = b[i];
+	}
+
+	/* We need to know the depth before we upload, but it's possible to
+	 * get called before a framebuffer is bound.  If this is the case,
+	 * mark the lut values as dirty by setting depth==0, and it'll be
+	 * uploaded on the first mode_set_base()
+	 */
+	if (!nv_crtc->base.fb) {
+		nv_crtc->lut.depth = 0;
+		return;
+	}
+
+	nv_crtc_gamma_load(crtc);
+}
+
+static int
+nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+			struct drm_framebuffer *old_fb)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+	struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
+	struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+	int arb_burst, arb_lwm;
+	int ret;
+
+	ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
+	if (ret)
+		return ret;
+
+	if (old_fb) {
+		struct nouveau_framebuffer *ofb = nouveau_framebuffer(old_fb);
+		nouveau_bo_unpin(ofb->nvbo);
+	}
+
+	nv_crtc->fb.offset = fb->nvbo->bo.offset;
+
+	if (nv_crtc->lut.depth != drm_fb->depth) {
+		nv_crtc->lut.depth = drm_fb->depth;
+		nv_crtc_gamma_load(crtc);
+	}
+
+	/* Update the framebuffer format. */
+	regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] &= ~3;
+	regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (crtc->fb->depth + 1) / 8;
+	regp->ramdac_gen_ctrl &= ~NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
+	if (crtc->fb->depth == 16)
+		regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
+	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_PIXEL_INDEX);
+	NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_GENERAL_CONTROL,
+		      regp->ramdac_gen_ctrl);
+
+	regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitch >> 3;
+	regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
+		XLATE(drm_fb->pitch >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX);
+	crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX);
+
+	/* Update the framebuffer location. */
+	regp->fb_start = nv_crtc->fb.offset & ~3;
+	regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8);
+	NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_START, regp->fb_start);
+
+	/* Update the arbitration parameters. */
+	nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel,
+			 &arb_burst, &arb_lwm);
+
+	regp->CRTC[NV_CIO_CRE_FF_INDEX] = arb_burst;
+	regp->CRTC[NV_CIO_CRE_FFLWM__INDEX] = arb_lwm & 0xff;
+	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX);
+	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX);
+
+	if (dev_priv->card_type >= NV_30) {
+		regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8;
+		crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47);
+	}
+
+	return 0;
+}
+
+static void nv04_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
+			       struct nouveau_bo *dst)
+{
+	int width = nv_cursor_width(dev);
+	uint32_t pixel;
+	int i, j;
+
+	for (i = 0; i < width; i++) {
+		for (j = 0; j < width; j++) {
+			pixel = nouveau_bo_rd32(src, i*64 + j);
+
+			nouveau_bo_wr16(dst, i*width + j, (pixel & 0x80000000) >> 16
+				     | (pixel & 0xf80000) >> 9
+				     | (pixel & 0xf800) >> 6
+				     | (pixel & 0xf8) >> 3);
+		}
+	}
+}
+
+static void nv11_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
+			       struct nouveau_bo *dst)
+{
+	uint32_t pixel;
+	int alpha, i;
+
+	/* nv11+ supports premultiplied (PM), or non-premultiplied (NPM) alpha
+	 * cursors (though NPM in combination with fp dithering may not work on
+	 * nv11, from "nv" driver history)
+	 * NPM mode needs NV_PCRTC_CURSOR_CONFIG_ALPHA_BLEND set and is what the
+	 * blob uses, however we get given PM cursors so we use PM mode
+	 */
+	for (i = 0; i < 64 * 64; i++) {
+		pixel = nouveau_bo_rd32(src, i);
+
+		/* hw gets unhappy if alpha <= rgb values.  for a PM image "less
+		 * than" shouldn't happen; fix "equal to" case by adding one to
+		 * alpha channel (slightly inaccurate, but so is attempting to
+		 * get back to NPM images, due to limits of integer precision)
+		 */
+		alpha = pixel >> 24;
+		if (alpha > 0 && alpha < 255)
+			pixel = (pixel & 0x00ffffff) | ((alpha + 1) << 24);
+
+#ifdef __BIG_ENDIAN
+		{
+			struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+			if (dev_priv->chipset == 0x11) {
+				pixel = ((pixel & 0x000000ff) << 24) |
+					((pixel & 0x0000ff00) << 8) |
+					((pixel & 0x00ff0000) >> 8) |
+					((pixel & 0xff000000) >> 24);
+			}
+		}
+#endif
+
+		nouveau_bo_wr32(dst, i, pixel);
+	}
+}
+
+static int
+nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+		     uint32_t buffer_handle, uint32_t width, uint32_t height)
+{
+	struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+	struct drm_device *dev = dev_priv->dev;
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct nouveau_bo *cursor = NULL;
+	struct drm_gem_object *gem;
+	int ret = 0;
+
+	if (width != 64 || height != 64)
+		return -EINVAL;
+
+	if (!buffer_handle) {
+		nv_crtc->cursor.hide(nv_crtc, true);
+		return 0;
+	}
+
+	gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
+	if (!gem)
+		return -EINVAL;
+	cursor = nouveau_gem_object(gem);
+
+	ret = nouveau_bo_map(cursor);
+	if (ret)
+		goto out;
+
+	if (dev_priv->chipset >= 0x11)
+		nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
+	else
+		nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
+
+	nouveau_bo_unmap(cursor);
+	nv_crtc->cursor.offset = nv_crtc->cursor.nvbo->bo.offset;
+	nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
+	nv_crtc->cursor.show(nv_crtc, true);
+out:
+	mutex_lock(&dev->struct_mutex);
+	drm_gem_object_unreference(gem);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+static int
+nv04_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+	nv_crtc->cursor.set_pos(nv_crtc, x, y);
+	return 0;
+}
+
+static const struct drm_crtc_funcs nv04_crtc_funcs = {
+	.save = nv_crtc_save,
+	.restore = nv_crtc_restore,
+	.cursor_set = nv04_crtc_cursor_set,
+	.cursor_move = nv04_crtc_cursor_move,
+	.gamma_set = nv_crtc_gamma_set,
+	.set_config = drm_crtc_helper_set_config,
+	.destroy = nv_crtc_destroy,
+};
+
+static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
+	.dpms = nv_crtc_dpms,
+	.prepare = nv_crtc_prepare,
+	.commit = nv_crtc_commit,
+	.mode_fixup = nv_crtc_mode_fixup,
+	.mode_set = nv_crtc_mode_set,
+	.mode_set_base = nv04_crtc_mode_set_base,
+	.load_lut = nv_crtc_gamma_load,
+};
+
+int
+nv04_crtc_create(struct drm_device *dev, int crtc_num)
+{
+	struct nouveau_crtc *nv_crtc;
+	int ret, i;
+
+	nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
+	if (!nv_crtc)
+		return -ENOMEM;
+
+	for (i = 0; i < 256; i++) {
+		nv_crtc->lut.r[i] = i << 8;
+		nv_crtc->lut.g[i] = i << 8;
+		nv_crtc->lut.b[i] = i << 8;
+	}
+	nv_crtc->lut.depth = 0;
+
+	nv_crtc->index = crtc_num;
+	nv_crtc->last_dpms = NV_DPMS_CLEARED;
+
+	drm_crtc_init(dev, &nv_crtc->base, &nv04_crtc_funcs);
+	drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
+	drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
+
+	ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
+			     0, 0x0000, false, true, &nv_crtc->cursor.nvbo);
+	if (!ret) {
+		ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
+		if (!ret)
+			ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
+		if (ret)
+			nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+	}
+
+	nv04_cursor_init(nv_crtc);
+
+	return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv04_cursor.c b/drivers/gpu/drm/nouveau/nv04_cursor.c
new file mode 100644
index 0000000..89a91b9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_cursor.c
@@ -0,0 +1,70 @@
+#include "drmP.h"
+#include "drm_mode.h"
+#include "nouveau_reg.h"
+#include "nouveau_drv.h"
+#include "nouveau_crtc.h"
+#include "nouveau_hw.h"
+
+static void
+nv04_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
+{
+	nv_show_cursor(nv_crtc->base.dev, nv_crtc->index, true);
+}
+
+static void
+nv04_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
+{
+	nv_show_cursor(nv_crtc->base.dev, nv_crtc->index, false);
+}
+
+static void
+nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
+{
+	NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index,
+		      NV_PRAMDAC_CU_START_POS,
+		      XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) |
+		      XLATE(x, 0, NV_PRAMDAC_CU_START_POS_X));
+}
+
+static void
+crtc_wr_cio_state(struct drm_crtc *crtc, struct nv04_crtc_reg *crtcstate, int index)
+{
+	NVWriteVgaCrtc(crtc->dev, nouveau_crtc(crtc)->index, index,
+		       crtcstate->CRTC[index]);
+}
+
+static void
+nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
+{
+	struct drm_device *dev = nv_crtc->base.dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+	struct drm_crtc *crtc = &nv_crtc->base;
+
+	regp->CRTC[NV_CIO_CRE_HCUR_ADDR0_INDEX] =
+		MASK(NV_CIO_CRE_HCUR_ASI) |
+		XLATE(offset, 17, NV_CIO_CRE_HCUR_ADDR0_ADR);
+	regp->CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX] =
+		XLATE(offset, 11, NV_CIO_CRE_HCUR_ADDR1_ADR);
+	if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
+		regp->CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX] |=
+			MASK(NV_CIO_CRE_HCUR_ADDR1_CUR_DBL);
+	regp->CRTC[NV_CIO_CRE_HCUR_ADDR2_INDEX] = offset >> 24;
+
+	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
+	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
+	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
+	if (dev_priv->card_type == NV_40)
+		nv_fix_nv40_hw_cursor(dev, nv_crtc->index);
+}
+
+int
+nv04_cursor_init(struct nouveau_crtc *crtc)
+{
+	crtc->cursor.set_offset = nv04_cursor_set_offset;
+	crtc->cursor.set_pos = nv04_cursor_set_pos;
+	crtc->cursor.hide = nv04_cursor_hide;
+	crtc->cursor.show = nv04_cursor_show;
+	return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
new file mode 100644
index 0000000..a5fa517
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -0,0 +1,528 @@
+/*
+ * Copyright 2003 NVIDIA, Corporation
+ * Copyright 2006 Dave Airlie
+ * Copyright 2007 Maarten Maathuis
+ * Copyright 2007-2009 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_crtc.h"
+#include "nouveau_hw.h"
+#include "nvreg.h"
+
+int nv04_dac_output_offset(struct drm_encoder *encoder)
+{
+	struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+	int offset = 0;
+
+	if (dcb->or & (8 | OUTPUT_C))
+		offset += 0x68;
+	if (dcb->or & (8 | OUTPUT_B))
+		offset += 0x2000;
+
+	return offset;
+}
+
+/*
+ * arbitrary limit to number of sense oscillations tolerated in one sample
+ * period (observed to be at least 13 in "nvidia")
+ */
+#define MAX_HBLANK_OSC 20
+
+/*
+ * arbitrary limit to number of conflicting sample pairs to tolerate at a
+ * voltage step (observed to be at least 5 in "nvidia")
+ */
+#define MAX_SAMPLE_PAIRS 10
+
+static int sample_load_twice(struct drm_device *dev, bool sense[2])
+{
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		bool sense_a, sense_b, sense_b_prime;
+		int j = 0;
+
+		/*
+		 * wait for bit 0 clear -- out of hblank -- (say reg value 0x4),
+		 * then wait for transition 0x4->0x5->0x4: enter hblank, leave
+		 * hblank again
+		 * use a 10ms timeout (guards against crtc being inactive, in
+		 * which case blank state would never change)
+		 */
+		if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+					0x00000001, 0x00000000))
+			return -EBUSY;
+		if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+					0x00000001, 0x00000001))
+			return -EBUSY;
+		if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+					0x00000001, 0x00000000))
+			return -EBUSY;
+
+		udelay(100);
+		/* when level triggers, sense is _LO_ */
+		sense_a = nv_rd08(dev, NV_PRMCIO_INP0) & 0x10;
+
+		/* take another reading until it agrees with sense_a... */
+		do {
+			udelay(100);
+			sense_b = nv_rd08(dev, NV_PRMCIO_INP0) & 0x10;
+			if (sense_a != sense_b) {
+				sense_b_prime =
+					nv_rd08(dev, NV_PRMCIO_INP0) & 0x10;
+				if (sense_b == sense_b_prime) {
+					/* ... unless two consecutive subsequent
+					 * samples agree; sense_a is replaced */
+					sense_a = sense_b;
+					/* force mis-match so we loop */
+					sense_b = !sense_a;
+				}
+			}
+		} while ((sense_a != sense_b) && ++j < MAX_HBLANK_OSC);
+
+		if (j == MAX_HBLANK_OSC)
+			/* with so much oscillation, default to sense:LO */
+			sense[i] = false;
+		else
+			sense[i] = sense_a;
+	}
+
+	return 0;
+}
+
+static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
+						 struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	uint8_t saved_seq1, saved_pi, saved_rpc1;
+	uint8_t saved_palette0[3], saved_palette_mask;
+	uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
+	int i;
+	uint8_t blue;
+	bool sense = true;
+
+	/*
+	 * for this detection to work, there needs to be a mode set up on the
+	 * CRTC.  this is presumed to be the case
+	 */
+
+	if (nv_two_heads(dev))
+		/* only implemented for head A for now */
+		NVSetOwner(dev, 0);
+
+	saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX);
+	NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20);
+
+	saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL);
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL,
+		      saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF);
+
+	msleep(10);
+
+	saved_pi = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX);
+	NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX,
+		       saved_pi & ~(0x80 | MASK(NV_CIO_CRE_PIXEL_FORMAT)));
+	saved_rpc1 = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX);
+	NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1 & ~0xc0);
+
+	nv_wr08(dev, NV_PRMDIO_READ_MODE_ADDRESS, 0x0);
+	for (i = 0; i < 3; i++)
+		saved_palette0[i] = nv_rd08(dev, NV_PRMDIO_PALETTE_DATA);
+	saved_palette_mask = nv_rd08(dev, NV_PRMDIO_PIXEL_MASK);
+	nv_wr08(dev, NV_PRMDIO_PIXEL_MASK, 0);
+
+	saved_rgen_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL);
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL,
+		      (saved_rgen_ctrl & ~(NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS |
+					   NV_PRAMDAC_GENERAL_CONTROL_TERMINATION_75OHM)) |
+		      NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON);
+
+	blue = 8;	/* start of test range */
+
+	do {
+		bool sense_pair[2];
+
+		nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
+		nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, 0);
+		nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, 0);
+		/* testing blue won't find monochrome monitors.  I don't care */
+		nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, blue);
+
+		i = 0;
+		/* take sample pairs until both samples in the pair agree */
+		do {
+			if (sample_load_twice(dev, sense_pair))
+				goto out;
+		} while ((sense_pair[0] != sense_pair[1]) &&
+							++i < MAX_SAMPLE_PAIRS);
+
+		if (i == MAX_SAMPLE_PAIRS)
+			/* too much oscillation defaults to LO */
+			sense = false;
+		else
+			sense = sense_pair[0];
+
+	/*
+	 * if sense goes LO before blue ramps to 0x18, monitor is not connected.
+	 * ergo, if blue gets to 0x18, monitor must be connected
+	 */
+	} while (++blue < 0x18 && sense);
+
+out:
+	nv_wr08(dev, NV_PRMDIO_PIXEL_MASK, saved_palette_mask);
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, saved_rgen_ctrl);
+	nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
+	for (i = 0; i < 3; i++)
+		nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]);
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL, saved_rtest_ctrl);
+	NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
+	NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
+	NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
+
+	if (blue == 0x18) {
+		NV_TRACE(dev, "Load detected on head A\n");
+		return connector_status_connected;
+	}
+
+	return connector_status_disconnected;
+}
+
+enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
+					  struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+	uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
+	uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
+		saved_rtest_ctrl, saved_gpio0, saved_gpio1, temp, routput;
+	int head, present = 0;
+
+#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
+	if (dcb->type == OUTPUT_TV) {
+		testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0);
+
+		if (dev_priv->vbios->tvdactestval)
+			testval = dev_priv->vbios->tvdactestval;
+	} else {
+		testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */
+
+		if (dev_priv->vbios->dactestval)
+			testval = dev_priv->vbios->dactestval;
+	}
+
+	saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset,
+		      saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF);
+
+	saved_powerctrl_2 = nvReadMC(dev, NV_PBUS_POWERCTRL_2);
+
+	nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff);
+	if (regoffset == 0x68) {
+		saved_powerctrl_4 = nvReadMC(dev, NV_PBUS_POWERCTRL_4);
+		nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
+	}
+
+	saved_gpio1 = nv17_gpio_get(dev, DCB_GPIO_TVDAC1);
+	saved_gpio0 = nv17_gpio_get(dev, DCB_GPIO_TVDAC0);
+
+	nv17_gpio_set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
+	nv17_gpio_set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
+
+	msleep(4);
+
+	saved_routput = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
+	head = (saved_routput & 0x100) >> 8;
+#if 0
+	/* if there's a spare crtc, using it will minimise flicker for the case
+	 * where the in-use crtc is in use by an off-chip tmds encoder */
+	if (xf86_config->crtc[head]->enabled && !xf86_config->crtc[head ^ 1]->enabled)
+		head ^= 1;
+#endif
+	/* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */
+	routput = (saved_routput & 0xfffffece) | head << 8;
+
+	if (dev_priv->card_type >= NV_40) {
+		if (dcb->type == OUTPUT_TV)
+			routput |= 0x1a << 16;
+		else
+			routput &= ~(0x1a << 16);
+	}
+
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, routput);
+	msleep(1);
+
+	temp = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, temp | 1);
+
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA,
+		      NV_PRAMDAC_TESTPOINT_DATA_NOTBLANK | testval);
+	temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
+		      temp | NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED);
+	msleep(5);
+
+	temp = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
+
+	if (dcb->type == OUTPUT_TV)
+		present = (nv17_tv_detect(encoder, connector, temp)
+			   == connector_status_connected);
+	else
+		present = temp & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI;
+
+	temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
+		      temp & ~NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA, 0);
+
+	/* bios does something more complex for restoring, but I think this is good enough */
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, saved_routput);
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, saved_rtest_ctrl);
+	if (regoffset == 0x68)
+		nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
+	nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
+
+	nv17_gpio_set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
+	nv17_gpio_set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
+
+	if (present) {
+		NV_INFO(dev, "Load detected on output %c\n", '@' + ffs(dcb->or));
+		return connector_status_connected;
+	}
+
+	return connector_status_disconnected;
+}
+
+
+static bool nv04_dac_mode_fixup(struct drm_encoder *encoder,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void nv04_dac_prepare(struct drm_encoder *encoder)
+{
+	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+	struct drm_device *dev = encoder->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int head = nouveau_crtc(encoder->crtc)->index;
+	struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
+
+	helper->dpms(encoder, DRM_MODE_DPMS_OFF);
+
+	nv04_dfp_disable(dev, head);
+
+	/* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
+	 * at LCD__INDEX which we don't alter
+	 */
+	if (!(crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] & 0x44))
+		crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] = 0;
+}
+
+
+static void nv04_dac_mode_set(struct drm_encoder *encoder,
+			      struct drm_display_mode *mode,
+			      struct drm_display_mode *adjusted_mode)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int head = nouveau_crtc(encoder->crtc)->index;
+
+	NV_TRACE(dev, "%s called for encoder %d\n", __func__,
+		      nv_encoder->dcb->index);
+
+	if (nv_gf4_disp_arch(dev)) {
+		struct drm_encoder *rebind;
+		uint32_t dac_offset = nv04_dac_output_offset(encoder);
+		uint32_t otherdac;
+
+		/* bit 16-19 are bits that are set on some G70 cards,
+		 * but don't seem to have much effect */
+		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset,
+			      head << 8 | NV_PRAMDAC_DACCLK_SEL_DACCLK);
+		/* force any other vga encoders to bind to the other crtc */
+		list_for_each_entry(rebind, &dev->mode_config.encoder_list, head) {
+			if (rebind == encoder
+			    || nouveau_encoder(rebind)->dcb->type != OUTPUT_ANALOG)
+				continue;
+
+			dac_offset = nv04_dac_output_offset(rebind);
+			otherdac = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset);
+			NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset,
+				      (otherdac & ~0x0100) | (head ^ 1) << 8);
+		}
+	}
+
+	/* This could use refinement for flatpanels, but it should work this way */
+	if (dev_priv->chipset < 0x44)
+		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
+	else
+		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
+}
+
+static void nv04_dac_commit(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+
+	helper->dpms(encoder, DRM_MODE_DPMS_ON);
+
+	NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
+		drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
+		nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+}
+
+void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+
+	if (nv_gf4_disp_arch(dev)) {
+		uint32_t *dac_users = &dev_priv->dac_users[ffs(dcb->or) - 1];
+		int dacclk_off = NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder);
+		uint32_t dacclk = NVReadRAMDAC(dev, 0, dacclk_off);
+
+		if (enable) {
+			*dac_users |= 1 << dcb->index;
+			NVWriteRAMDAC(dev, 0, dacclk_off, dacclk | NV_PRAMDAC_DACCLK_SEL_DACCLK);
+
+		} else {
+			*dac_users &= ~(1 << dcb->index);
+			if (!*dac_users)
+				NVWriteRAMDAC(dev, 0, dacclk_off,
+					dacclk & ~NV_PRAMDAC_DACCLK_SEL_DACCLK);
+		}
+	}
+}
+
+static void nv04_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+	if (nv_encoder->last_dpms == mode)
+		return;
+	nv_encoder->last_dpms = mode;
+
+	NV_INFO(dev, "Setting dpms mode %d on vga encoder (output %d)\n",
+		     mode, nv_encoder->dcb->index);
+
+	nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
+}
+
+static void nv04_dac_save(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+
+	if (nv_gf4_disp_arch(dev))
+		nv_encoder->restore.output = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK +
+							  nv04_dac_output_offset(encoder));
+}
+
+static void nv04_dac_restore(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+
+	if (nv_gf4_disp_arch(dev))
+		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder),
+			      nv_encoder->restore.output);
+
+	nv_encoder->last_dpms = NV_DPMS_CLEARED;
+}
+
+static void nv04_dac_destroy(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+	NV_DEBUG(encoder->dev, "\n");
+
+	drm_encoder_cleanup(encoder);
+	kfree(nv_encoder);
+}
+
+static const struct drm_encoder_helper_funcs nv04_dac_helper_funcs = {
+	.dpms = nv04_dac_dpms,
+	.save = nv04_dac_save,
+	.restore = nv04_dac_restore,
+	.mode_fixup = nv04_dac_mode_fixup,
+	.prepare = nv04_dac_prepare,
+	.commit = nv04_dac_commit,
+	.mode_set = nv04_dac_mode_set,
+	.detect = nv04_dac_detect
+};
+
+static const struct drm_encoder_helper_funcs nv17_dac_helper_funcs = {
+	.dpms = nv04_dac_dpms,
+	.save = nv04_dac_save,
+	.restore = nv04_dac_restore,
+	.mode_fixup = nv04_dac_mode_fixup,
+	.prepare = nv04_dac_prepare,
+	.commit = nv04_dac_commit,
+	.mode_set = nv04_dac_mode_set,
+	.detect = nv17_dac_detect
+};
+
+static const struct drm_encoder_funcs nv04_dac_funcs = {
+	.destroy = nv04_dac_destroy,
+};
+
+int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry)
+{
+	const struct drm_encoder_helper_funcs *helper;
+	struct drm_encoder *encoder;
+	struct nouveau_encoder *nv_encoder = NULL;
+
+	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+	if (!nv_encoder)
+		return -ENOMEM;
+
+	encoder = to_drm_encoder(nv_encoder);
+
+	nv_encoder->dcb = entry;
+	nv_encoder->or = ffs(entry->or) - 1;
+
+	if (nv_gf4_disp_arch(dev))
+		helper = &nv17_dac_helper_funcs;
+	else
+		helper = &nv04_dac_helper_funcs;
+
+	drm_encoder_init(dev, encoder, &nv04_dac_funcs, DRM_MODE_ENCODER_DAC);
+	drm_encoder_helper_add(encoder, helper);
+
+	encoder->possible_crtcs = entry->heads;
+	encoder->possible_clones = 0;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
new file mode 100644
index 0000000..e5b3333
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -0,0 +1,621 @@
+/*
+ * Copyright 2003 NVIDIA, Corporation
+ * Copyright 2006 Dave Airlie
+ * Copyright 2007 Maarten Maathuis
+ * Copyright 2007-2009 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_crtc.h"
+#include "nouveau_hw.h"
+#include "nvreg.h"
+
+#define FP_TG_CONTROL_ON  (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |	\
+			   NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS |		\
+			   NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS)
+#define FP_TG_CONTROL_OFF (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_DISABLE |	\
+			   NV_PRAMDAC_FP_TG_CONTROL_HSYNC_DISABLE |	\
+			   NV_PRAMDAC_FP_TG_CONTROL_VSYNC_DISABLE)
+
+static inline bool is_fpc_off(uint32_t fpc)
+{
+	return ((fpc & (FP_TG_CONTROL_ON | FP_TG_CONTROL_OFF)) ==
+			FP_TG_CONTROL_OFF);
+}
+
+int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent)
+{
+	/* special case of nv_read_tmds to find crtc associated with an output.
+	 * this does not give a correct answer for off-chip dvi, but there's no
+	 * use for such an answer anyway
+	 */
+	int ramdac = (dcbent->or & OUTPUT_C) >> 2;
+
+	NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL,
+	NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | 0x4);
+	return ((NVReadRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA) & 0x8) >> 3) ^ ramdac;
+}
+
+void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent,
+			int head, bool dl)
+{
+	/* The BIOS scripts don't do this for us, sadly
+	 * Luckily we do know the values ;-)
+	 *
+	 * head < 0 indicates we wish to force a setting with the overrideval
+	 * (for VT restore etc.)
+	 */
+
+	int ramdac = (dcbent->or & OUTPUT_C) >> 2;
+	uint8_t tmds04 = 0x80;
+
+	if (head != ramdac)
+		tmds04 = 0x88;
+
+	if (dcbent->type == OUTPUT_LVDS)
+		tmds04 |= 0x01;
+
+	nv_write_tmds(dev, dcbent->or, 0, 0x04, tmds04);
+
+	if (dl)	/* dual link */
+		nv_write_tmds(dev, dcbent->or, 1, 0x04, tmds04 ^ 0x08);
+}
+
+void nv04_dfp_disable(struct drm_device *dev, int head)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
+
+	if (NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL) &
+	    FP_TG_CONTROL_ON) {
+		/* digital remnants must be cleaned before new crtc
+		 * values programmed.  delay is time for the vga stuff
+		 * to realise it's in control again
+		 */
+		NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL,
+			      FP_TG_CONTROL_OFF);
+		msleep(50);
+	}
+	/* don't inadvertently turn it on when state written later */
+	crtcstate[head].fp_control = FP_TG_CONTROL_OFF;
+}
+
+void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
+	struct nouveau_crtc *nv_crtc;
+	uint32_t *fpc;
+
+	if (mode == DRM_MODE_DPMS_ON) {
+		nv_crtc = nouveau_crtc(encoder->crtc);
+		fpc = &dev_priv->mode_reg.crtc_reg[nv_crtc->index].fp_control;
+
+		if (is_fpc_off(*fpc)) {
+			/* using saved value is ok, as (is_digital && dpms_on &&
+			 * fp_control==OFF) is (at present) *only* true when
+			 * fpc's most recent change was by below "off" code
+			 */
+			*fpc = nv_crtc->dpms_saved_fp_control;
+		}
+
+		nv_crtc->fp_users |= 1 << nouveau_encoder(encoder)->dcb->index;
+		NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_FP_TG_CONTROL, *fpc);
+	} else {
+		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+			nv_crtc = nouveau_crtc(crtc);
+			fpc = &dev_priv->mode_reg.crtc_reg[nv_crtc->index].fp_control;
+
+			nv_crtc->fp_users &= ~(1 << nouveau_encoder(encoder)->dcb->index);
+			if (!is_fpc_off(*fpc) && !nv_crtc->fp_users) {
+				nv_crtc->dpms_saved_fp_control = *fpc;
+				/* cut the FP output */
+				*fpc &= ~FP_TG_CONTROL_ON;
+				*fpc |= FP_TG_CONTROL_OFF;
+				NVWriteRAMDAC(dev, nv_crtc->index,
+					      NV_PRAMDAC_FP_TG_CONTROL, *fpc);
+			}
+		}
+	}
+}
+
+static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
+
+	/* For internal panels and gpu scaling on DVI we need the native mode */
+	if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
+		if (!nv_connector->native_mode)
+			return false;
+		nv_encoder->mode = *nv_connector->native_mode;
+		adjusted_mode->clock = nv_connector->native_mode->clock;
+	} else {
+		nv_encoder->mode = *adjusted_mode;
+	}
+
+	return true;
+}
+
+static void nv04_dfp_prepare_sel_clk(struct drm_device *dev,
+				     struct nouveau_encoder *nv_encoder, int head)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv04_mode_state *state = &dev_priv->mode_reg;
+	uint32_t bits1618 = nv_encoder->dcb->or & OUTPUT_A ? 0x10000 : 0x40000;
+
+	if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP)
+		return;
+
+	/* SEL_CLK is only used on the primary ramdac
+	 * It toggles spread spectrum PLL output and sets the bindings of PLLs
+	 * to heads on digital outputs
+	 */
+	if (head)
+		state->sel_clk |= bits1618;
+	else
+		state->sel_clk &= ~bits1618;
+
+	/* nv30:
+	 *	bit 0		NVClk spread spectrum on/off
+	 *	bit 2		MemClk spread spectrum on/off
+	 * 	bit 4		PixClk1 spread spectrum on/off toggle
+	 * 	bit 6		PixClk2 spread spectrum on/off toggle
+	 *
+	 * nv40 (observations from bios behaviour and mmio traces):
+	 * 	bits 4&6	as for nv30
+	 * 	bits 5&7	head dependent as for bits 4&6, but do not appear with 4&6;
+	 * 			maybe a different spread mode
+	 * 	bits 8&10	seen on dual-link dvi outputs, purpose unknown (set by POST scripts)
+	 * 	The logic behind turning spread spectrum on/off in the first place,
+	 * 	and which bit-pair to use, is unclear on nv40 (for earlier cards, the fp table
+	 * 	entry has the necessary info)
+	 */
+	if (nv_encoder->dcb->type == OUTPUT_LVDS && dev_priv->saved_reg.sel_clk & 0xf0) {
+		int shift = (dev_priv->saved_reg.sel_clk & 0x50) ? 0 : 1;
+
+		state->sel_clk &= ~0xf0;
+		state->sel_clk |= (head ? 0x40 : 0x10) << shift;
+	}
+}
+
+static void nv04_dfp_prepare(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+	struct drm_device *dev = encoder->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int head = nouveau_crtc(encoder->crtc)->index;
+	struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
+	uint8_t *cr_lcd = &crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX];
+	uint8_t *cr_lcd_oth = &crtcstate[head ^ 1].CRTC[NV_CIO_CRE_LCD__INDEX];
+
+	helper->dpms(encoder, DRM_MODE_DPMS_OFF);
+
+	nv04_dfp_prepare_sel_clk(dev, nv_encoder, head);
+
+	/* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
+	 * at LCD__INDEX which we don't alter
+	 */
+	if (!(*cr_lcd & 0x44)) {
+		*cr_lcd = 0x3;
+
+		if (nv_two_heads(dev)) {
+			if (nv_encoder->dcb->location == DCB_LOC_ON_CHIP)
+				*cr_lcd |= head ? 0x0 : 0x8;
+			else {
+				*cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30;
+				if (nv_encoder->dcb->type == OUTPUT_LVDS)
+					*cr_lcd |= 0x30;
+				if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) {
+					/* avoid being connected to both crtcs */
+					*cr_lcd_oth &= ~0x30;
+					NVWriteVgaCrtc(dev, head ^ 1,
+						       NV_CIO_CRE_LCD__INDEX,
+						       *cr_lcd_oth);
+				}
+			}
+		}
+	}
+}
+
+
+static void nv04_dfp_mode_set(struct drm_encoder *encoder,
+			      struct drm_display_mode *mode,
+			      struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+	struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+	struct nv04_crtc_reg *savep = &dev_priv->saved_reg.crtc_reg[nv_crtc->index];
+	struct nouveau_connector *nv_connector = nouveau_crtc_connector_get(nv_crtc);
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_display_mode *output_mode = &nv_encoder->mode;
+	uint32_t mode_ratio, panel_ratio;
+
+	NV_DEBUG(dev, "Output mode on CRTC %d:\n", nv_crtc->index);
+	drm_mode_debug_printmodeline(output_mode);
+
+	/* Initialize the FP registers in this CRTC. */
+	regp->fp_horiz_regs[FP_DISPLAY_END] = output_mode->hdisplay - 1;
+	regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
+	if (!nv_gf4_disp_arch(dev) ||
+	    (output_mode->hsync_start - output_mode->hdisplay) >=
+					dev_priv->vbios->digital_min_front_porch)
+		regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay;
+	else
+		regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios->digital_min_front_porch - 1;
+	regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1;
+	regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
+	regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew;
+	regp->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - 1;
+
+	regp->fp_vert_regs[FP_DISPLAY_END] = output_mode->vdisplay - 1;
+	regp->fp_vert_regs[FP_TOTAL] = output_mode->vtotal - 1;
+	regp->fp_vert_regs[FP_CRTC] = output_mode->vtotal - 5 - 1;
+	regp->fp_vert_regs[FP_SYNC_START] = output_mode->vsync_start - 1;
+	regp->fp_vert_regs[FP_SYNC_END] = output_mode->vsync_end - 1;
+	regp->fp_vert_regs[FP_VALID_START] = 0;
+	regp->fp_vert_regs[FP_VALID_END] = output_mode->vdisplay - 1;
+
+	/* bit26: a bit seen on some g7x, no as yet discernable purpose */
+	regp->fp_control = NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
+			   (savep->fp_control & (1 << 26 | NV_PRAMDAC_FP_TG_CONTROL_READ_PROG));
+	/* Deal with vsync/hsync polarity */
+	/* LVDS screens do set this, but modes with +ve syncs are very rare */
+	if (output_mode->flags & DRM_MODE_FLAG_PVSYNC)
+		regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS;
+	if (output_mode->flags & DRM_MODE_FLAG_PHSYNC)
+		regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS;
+	/* panel scaling first, as native would get set otherwise */
+	if (nv_connector->scaling_mode == DRM_MODE_SCALE_NONE ||
+	    nv_connector->scaling_mode == DRM_MODE_SCALE_CENTER)	/* panel handles it */
+		regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_CENTER;
+	else if (adjusted_mode->hdisplay == output_mode->hdisplay &&
+		 adjusted_mode->vdisplay == output_mode->vdisplay) /* native mode */
+		regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE;
+	else /* gpu needs to scale */
+		regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE;
+	if (nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT)
+		regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12;
+	if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP &&
+	    output_mode->clock > 165000)
+		regp->fp_control |= (2 << 24);
+	if (nv_encoder->dcb->type == OUTPUT_LVDS) {
+		bool duallink, dummy;
+
+		nouveau_bios_parse_lvds_table(dev, nv_connector->native_mode->
+					      clock, &duallink, &dummy);
+		if (duallink)
+			regp->fp_control |= (8 << 28);
+	} else
+	if (output_mode->clock > 165000)
+		regp->fp_control |= (8 << 28);
+
+	regp->fp_debug_0 = NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND |
+			   NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND |
+			   NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR |
+			   NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR |
+			   NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED |
+			   NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE |
+			   NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE;
+
+	/* We want automatic scaling */
+	regp->fp_debug_1 = 0;
+	/* This can override HTOTAL and VTOTAL */
+	regp->fp_debug_2 = 0;
+
+	/* Use 20.12 fixed point format to avoid floats */
+	mode_ratio = (1 << 12) * adjusted_mode->hdisplay / adjusted_mode->vdisplay;
+	panel_ratio = (1 << 12) * output_mode->hdisplay / output_mode->vdisplay;
+	/* if ratios are equal, SCALE_ASPECT will automatically (and correctly)
+	 * get treated the same as SCALE_FULLSCREEN */
+	if (nv_connector->scaling_mode == DRM_MODE_SCALE_ASPECT &&
+	    mode_ratio != panel_ratio) {
+		uint32_t diff, scale;
+		bool divide_by_2 = nv_gf4_disp_arch(dev);
+
+		if (mode_ratio < panel_ratio) {
+			/* vertical needs to expand to glass size (automatic)
+			 * horizontal needs to be scaled at vertical scale factor
+			 * to maintain aspect */
+
+			scale = (1 << 12) * adjusted_mode->vdisplay / output_mode->vdisplay;
+			regp->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE |
+					   XLATE(scale, divide_by_2, NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE);
+
+			/* restrict area of screen used, horizontally */
+			diff = output_mode->hdisplay -
+			       output_mode->vdisplay * mode_ratio / (1 << 12);
+			regp->fp_horiz_regs[FP_VALID_START] += diff / 2;
+			regp->fp_horiz_regs[FP_VALID_END] -= diff / 2;
+		}
+
+		if (mode_ratio > panel_ratio) {
+			/* horizontal needs to expand to glass size (automatic)
+			 * vertical needs to be scaled at horizontal scale factor
+			 * to maintain aspect */
+
+			scale = (1 << 12) * adjusted_mode->hdisplay / output_mode->hdisplay;
+			regp->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE |
+					   XLATE(scale, divide_by_2, NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE);
+
+			/* restrict area of screen used, vertically */
+			diff = output_mode->vdisplay -
+			       (1 << 12) * output_mode->hdisplay / mode_ratio;
+			regp->fp_vert_regs[FP_VALID_START] += diff / 2;
+			regp->fp_vert_regs[FP_VALID_END] -= diff / 2;
+		}
+	}
+
+	/* Output property. */
+	if (nv_connector->use_dithering) {
+		if (dev_priv->chipset == 0x11)
+			regp->dither = savep->dither | 0x00010000;
+		else {
+			int i;
+			regp->dither = savep->dither | 0x00000001;
+			for (i = 0; i < 3; i++) {
+				regp->dither_regs[i] = 0xe4e4e4e4;
+				regp->dither_regs[i + 3] = 0x44444444;
+			}
+		}
+	} else {
+		if (dev_priv->chipset != 0x11) {
+			/* reset them */
+			int i;
+			for (i = 0; i < 3; i++) {
+				regp->dither_regs[i] = savep->dither_regs[i];
+				regp->dither_regs[i + 3] = savep->dither_regs[i + 3];
+			}
+		}
+		regp->dither = savep->dither;
+	}
+
+	regp->fp_margin_color = 0;
+}
+
+static void nv04_dfp_commit(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct dcb_entry *dcbe = nv_encoder->dcb;
+	int head = nouveau_crtc(encoder->crtc)->index;
+
+	NV_TRACE(dev, "%s called for encoder %d\n", __func__, nv_encoder->dcb->index);
+
+	if (dcbe->type == OUTPUT_TMDS)
+		run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
+	else if (dcbe->type == OUTPUT_LVDS)
+		call_lvds_script(dev, dcbe, head, LVDS_RESET, nv_encoder->mode.clock);
+
+	/* update fp_control state for any changes made by scripts,
+	 * so correct value is written at DPMS on */
+	dev_priv->mode_reg.crtc_reg[head].fp_control =
+		NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
+
+	/* This could use refinement for flatpanels, but it should work this way */
+	if (dev_priv->chipset < 0x44)
+		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
+	else
+		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
+
+	helper->dpms(encoder, DRM_MODE_DPMS_ON);
+
+	NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
+		drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
+		nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+}
+
+static inline bool is_powersaving_dpms(int mode)
+{
+	return (mode != DRM_MODE_DPMS_ON);
+}
+
+static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_crtc *crtc = encoder->crtc;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	bool was_powersaving = is_powersaving_dpms(nv_encoder->last_dpms);
+
+	if (nv_encoder->last_dpms == mode)
+		return;
+	nv_encoder->last_dpms = mode;
+
+	NV_INFO(dev, "Setting dpms mode %d on lvds encoder (output %d)\n",
+		     mode, nv_encoder->dcb->index);
+
+	if (was_powersaving && is_powersaving_dpms(mode))
+		return;
+
+	if (nv_encoder->dcb->lvdsconf.use_power_scripts) {
+		struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
+
+		/* when removing an output, crtc may not be set, but PANEL_OFF
+		 * must still be run
+		 */
+		int head = crtc ? nouveau_crtc(crtc)->index :
+			   nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
+
+		if (mode == DRM_MODE_DPMS_ON) {
+			if (!nv_connector->native_mode) {
+				NV_ERROR(dev, "Not turning on LVDS without native mode\n");
+				return;
+			}
+			call_lvds_script(dev, nv_encoder->dcb, head,
+					 LVDS_PANEL_ON, nv_connector->native_mode->clock);
+		} else
+			/* pxclk of 0 is fine for PANEL_OFF, and for a
+			 * disconnected LVDS encoder there is no native_mode
+			 */
+			call_lvds_script(dev, nv_encoder->dcb, head,
+					 LVDS_PANEL_OFF, 0);
+	}
+
+	nv04_dfp_update_fp_control(encoder, mode);
+
+	if (mode == DRM_MODE_DPMS_ON)
+		nv04_dfp_prepare_sel_clk(dev, nv_encoder, nouveau_crtc(crtc)->index);
+	else {
+		dev_priv->mode_reg.sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
+		dev_priv->mode_reg.sel_clk &= ~0xf0;
+	}
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, dev_priv->mode_reg.sel_clk);
+}
+
+static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+	if (nv_encoder->last_dpms == mode)
+		return;
+	nv_encoder->last_dpms = mode;
+
+	NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n",
+		     mode, nv_encoder->dcb->index);
+
+	nv04_dfp_update_fp_control(encoder, mode);
+}
+
+static void nv04_dfp_save(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+
+	if (nv_two_heads(dev))
+		nv_encoder->restore.head =
+			nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
+}
+
+static void nv04_dfp_restore(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int head = nv_encoder->restore.head;
+
+	if (nv_encoder->dcb->type == OUTPUT_LVDS) {
+		struct drm_display_mode *native_mode = nouveau_encoder_connector_get(nv_encoder)->native_mode;
+		if (native_mode)
+			call_lvds_script(dev, nv_encoder->dcb, head, LVDS_PANEL_ON,
+					 native_mode->clock);
+		else
+			NV_ERROR(dev, "Not restoring LVDS without native mode\n");
+
+	} else if (nv_encoder->dcb->type == OUTPUT_TMDS) {
+		int clock = nouveau_hw_pllvals_to_clk
+					(&dev_priv->saved_reg.crtc_reg[head].pllvals);
+
+		run_tmds_table(dev, nv_encoder->dcb, head, clock);
+	}
+
+	nv_encoder->last_dpms = NV_DPMS_CLEARED;
+}
+
+static void nv04_dfp_destroy(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+	NV_DEBUG(encoder->dev, "\n");
+
+	drm_encoder_cleanup(encoder);
+	kfree(nv_encoder);
+}
+
+static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
+	.dpms = nv04_lvds_dpms,
+	.save = nv04_dfp_save,
+	.restore = nv04_dfp_restore,
+	.mode_fixup = nv04_dfp_mode_fixup,
+	.prepare = nv04_dfp_prepare,
+	.commit = nv04_dfp_commit,
+	.mode_set = nv04_dfp_mode_set,
+	.detect = NULL,
+};
+
+static const struct drm_encoder_helper_funcs nv04_tmds_helper_funcs = {
+	.dpms = nv04_tmds_dpms,
+	.save = nv04_dfp_save,
+	.restore = nv04_dfp_restore,
+	.mode_fixup = nv04_dfp_mode_fixup,
+	.prepare = nv04_dfp_prepare,
+	.commit = nv04_dfp_commit,
+	.mode_set = nv04_dfp_mode_set,
+	.detect = NULL,
+};
+
+static const struct drm_encoder_funcs nv04_dfp_funcs = {
+	.destroy = nv04_dfp_destroy,
+};
+
+int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry)
+{
+	const struct drm_encoder_helper_funcs *helper;
+	struct drm_encoder *encoder;
+	struct nouveau_encoder *nv_encoder = NULL;
+	int type;
+
+	switch (entry->type) {
+	case OUTPUT_TMDS:
+		type = DRM_MODE_ENCODER_TMDS;
+		helper = &nv04_tmds_helper_funcs;
+		break;
+	case OUTPUT_LVDS:
+		type = DRM_MODE_ENCODER_LVDS;
+		helper = &nv04_lvds_helper_funcs;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+	if (!nv_encoder)
+		return -ENOMEM;
+
+	encoder = to_drm_encoder(nv_encoder);
+
+	nv_encoder->dcb = entry;
+	nv_encoder->or = ffs(entry->or) - 1;
+
+	drm_encoder_init(dev, encoder, &nv04_dfp_funcs, type);
+	drm_encoder_helper_add(encoder, helper);
+
+	encoder->possible_crtcs = entry->heads;
+	encoder->possible_clones = 0;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
new file mode 100644
index 0000000..b47c757
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_fb.h"
+#include "nouveau_hw.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+
+#define MULTIPLE_ENCODERS(e) (e & (e - 1))
+
+static void
+nv04_display_store_initial_head_owner(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->chipset != 0x11) {
+		dev_priv->crtc_owner = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44);
+		goto ownerknown;
+	}
+
+	/* reading CR44 is broken on nv11, so we attempt to infer it */
+	if (nvReadMC(dev, NV_PBUS_DEBUG_1) & (1 << 28))	/* heads tied, restore both */
+		dev_priv->crtc_owner = 0x4;
+	else {
+		uint8_t slaved_on_A, slaved_on_B;
+		bool tvA = false;
+		bool tvB = false;
+
+		NVLockVgaCrtcs(dev, false);
+
+		slaved_on_B = NVReadVgaCrtc(dev, 1, NV_CIO_CRE_PIXEL_INDEX) &
+									0x80;
+		if (slaved_on_B)
+			tvB = !(NVReadVgaCrtc(dev, 1, NV_CIO_CRE_LCD__INDEX) &
+					MASK(NV_CIO_CRE_LCD_LCD_SELECT));
+
+		slaved_on_A = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX) &
+									0x80;
+		if (slaved_on_A)
+			tvA = !(NVReadVgaCrtc(dev, 0, NV_CIO_CRE_LCD__INDEX) &
+					MASK(NV_CIO_CRE_LCD_LCD_SELECT));
+
+		NVLockVgaCrtcs(dev, true);
+
+		if (slaved_on_A && !tvA)
+			dev_priv->crtc_owner = 0x0;
+		else if (slaved_on_B && !tvB)
+			dev_priv->crtc_owner = 0x3;
+		else if (slaved_on_A)
+			dev_priv->crtc_owner = 0x0;
+		else if (slaved_on_B)
+			dev_priv->crtc_owner = 0x3;
+		else
+			dev_priv->crtc_owner = 0x0;
+	}
+
+ownerknown:
+	NV_INFO(dev, "Initial CRTC_OWNER is %d\n", dev_priv->crtc_owner);
+
+	/* we need to ensure the heads are not tied henceforth, or reading any
+	 * 8 bit reg on head B will fail
+	 * setting a single arbitrary head solves that */
+	NVSetOwner(dev, 0);
+}
+
+int
+nv04_display_create(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct parsed_dcb *dcb = dev_priv->vbios->dcb;
+	struct drm_encoder *encoder;
+	struct drm_crtc *crtc;
+	uint16_t connector[16] = { 0 };
+	int i, ret;
+
+	NV_DEBUG(dev, "\n");
+
+	if (nv_two_heads(dev))
+		nv04_display_store_initial_head_owner(dev);
+
+	drm_mode_config_init(dev);
+	drm_mode_create_scaling_mode_property(dev);
+	drm_mode_create_dithering_property(dev);
+
+	dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
+
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+	switch (dev_priv->card_type) {
+	case NV_04:
+		dev->mode_config.max_width = 2048;
+		dev->mode_config.max_height = 2048;
+		break;
+	default:
+		dev->mode_config.max_width = 4096;
+		dev->mode_config.max_height = 4096;
+		break;
+	}
+
+	dev->mode_config.fb_base = dev_priv->fb_phys;
+
+	nv04_crtc_create(dev, 0);
+	if (nv_two_heads(dev))
+		nv04_crtc_create(dev, 1);
+
+	for (i = 0; i < dcb->entries; i++) {
+		struct dcb_entry *dcbent = &dcb->entry[i];
+
+		switch (dcbent->type) {
+		case OUTPUT_ANALOG:
+			ret = nv04_dac_create(dev, dcbent);
+			break;
+		case OUTPUT_LVDS:
+		case OUTPUT_TMDS:
+			ret = nv04_dfp_create(dev, dcbent);
+			break;
+		case OUTPUT_TV:
+			if (dcbent->location == DCB_LOC_ON_CHIP)
+				ret = nv17_tv_create(dev, dcbent);
+			else
+				ret = nv04_tv_create(dev, dcbent);
+			break;
+		default:
+			NV_WARN(dev, "DCB type %d not known\n", dcbent->type);
+			continue;
+		}
+
+		if (ret)
+			continue;
+
+		connector[dcbent->connector] |= (1 << dcbent->type);
+	}
+
+	for (i = 0; i < dcb->entries; i++) {
+		struct dcb_entry *dcbent = &dcb->entry[i];
+		uint16_t encoders;
+		int type;
+
+		encoders = connector[dcbent->connector];
+		if (!(encoders & (1 << dcbent->type)))
+			continue;
+		connector[dcbent->connector] = 0;
+
+		switch (dcbent->type) {
+		case OUTPUT_ANALOG:
+			if (!MULTIPLE_ENCODERS(encoders))
+				type = DRM_MODE_CONNECTOR_VGA;
+			else
+				type = DRM_MODE_CONNECTOR_DVII;
+			break;
+		case OUTPUT_TMDS:
+			if (!MULTIPLE_ENCODERS(encoders))
+				type = DRM_MODE_CONNECTOR_DVID;
+			else
+				type = DRM_MODE_CONNECTOR_DVII;
+			break;
+		case OUTPUT_LVDS:
+			type = DRM_MODE_CONNECTOR_LVDS;
+#if 0
+			/* don't create i2c adapter when lvds ddc not allowed */
+			if (dcbent->lvdsconf.use_straps_for_mode ||
+			    dev_priv->vbios->fp_no_ddc)
+				i2c_index = 0xf;
+#endif
+			break;
+		case OUTPUT_TV:
+			type = DRM_MODE_CONNECTOR_TV;
+			break;
+		default:
+			type = DRM_MODE_CONNECTOR_Unknown;
+			continue;
+		}
+
+		nouveau_connector_create(dev, dcbent->connector, type);
+	}
+
+	/* Save previous state */
+	NVLockVgaCrtcs(dev, false);
+
+	nouveau_hw_save_vga_fonts(dev, 1);
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		crtc->funcs->save(crtc);
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct drm_encoder_helper_funcs *func = encoder->helper_private;
+
+		func->save(encoder);
+	}
+
+	return 0;
+}
+
+void
+nv04_display_destroy(struct drm_device *dev)
+{
+	struct drm_encoder *encoder;
+	struct drm_crtc *crtc;
+
+	NV_DEBUG(dev, "\n");
+
+	/* Turn every CRTC off. */
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct drm_mode_set modeset = {
+			.crtc = crtc,
+		};
+
+		crtc->funcs->set_config(&modeset);
+	}
+
+	/* Restore state */
+	NVLockVgaCrtcs(dev, false);
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct drm_encoder_helper_funcs *func = encoder->helper_private;
+
+		func->restore(encoder);
+	}
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		crtc->funcs->restore(crtc);
+
+	nouveau_hw_save_vga_fonts(dev, 0);
+
+	drm_mode_config_cleanup(dev);
+}
+
+void
+nv04_display_restore(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct drm_encoder *encoder;
+	struct drm_crtc *crtc;
+
+	NVLockVgaCrtcs(dev, false);
+
+	/* meh.. modeset apparently doesn't setup all the regs and depends
+	 * on pre-existing state, for now load the state of the card *before*
+	 * nouveau was loaded, and then do a modeset.
+	 *
+	 * best thing to do probably is to make save/restore routines not
+	 * save/restore "pre-load" state, but more general so we can save
+	 * on suspend too.
+	 */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct drm_encoder_helper_funcs *func = encoder->helper_private;
+
+		func->restore(encoder);
+	}
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		crtc->funcs->restore(crtc);
+
+	if (nv_two_heads(dev)) {
+		NV_INFO(dev, "Restoring CRTC_OWNER to %d.\n",
+			dev_priv->crtc_owner);
+		NVSetOwner(dev, dev_priv->crtc_owner);
+	}
+
+	NVLockVgaCrtcs(dev, true);
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv04_fb.c b/drivers/gpu/drm/nouveau/nv04_fb.c
new file mode 100644
index 0000000..638cf60
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_fb.c
@@ -0,0 +1,21 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv04_fb_init(struct drm_device *dev)
+{
+	/* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
+	 * nvidia reading PFB_CFG_0, then writing back its original value.
+	 * (which was 0x701114 in this case)
+	 */
+
+	nv_wr32(dev, NV04_PFB_CFG0, 0x1114);
+	return 0;
+}
+
+void
+nv04_fb_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
new file mode 100644
index 0000000..09a3107
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -0,0 +1,316 @@
+/*
+ * Copyright 2009 Ben Skeggs
+ * Copyright 2008 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_fbcon.h"
+
+static void
+nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
+{
+	struct nouveau_fbcon_par *par = info->par;
+	struct drm_device *dev = par->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = dev_priv->channel;
+
+	if (info->state != FBINFO_STATE_RUNNING)
+		return;
+
+	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) {
+		NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+		info->flags |= FBINFO_HWACCEL_DISABLED;
+	}
+
+	if (info->flags & FBINFO_HWACCEL_DISABLED) {
+		cfb_copyarea(info, region);
+		return;
+	}
+
+	BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3);
+	OUT_RING(chan, (region->sy << 16) | region->sx);
+	OUT_RING(chan, (region->dy << 16) | region->dx);
+	OUT_RING(chan, (region->height << 16) | region->width);
+	FIRE_RING(chan);
+}
+
+static void
+nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+	struct nouveau_fbcon_par *par = info->par;
+	struct drm_device *dev = par->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = dev_priv->channel;
+	uint32_t color = ((uint32_t *) info->pseudo_palette)[rect->color];
+
+	if (info->state != FBINFO_STATE_RUNNING)
+		return;
+
+	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) {
+		NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+		info->flags |= FBINFO_HWACCEL_DISABLED;
+	}
+
+	if (info->flags & FBINFO_HWACCEL_DISABLED) {
+		cfb_fillrect(info, rect);
+		return;
+	}
+
+	BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
+	OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
+	BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1);
+	OUT_RING(chan, color);
+	BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2);
+	OUT_RING(chan, (rect->dx << 16) | rect->dy);
+	OUT_RING(chan, (rect->width << 16) | rect->height);
+	FIRE_RING(chan);
+}
+
+static void
+nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+	struct nouveau_fbcon_par *par = info->par;
+	struct drm_device *dev = par->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = dev_priv->channel;
+	uint32_t fg;
+	uint32_t bg;
+	uint32_t dsize;
+	uint32_t width;
+	uint32_t *data = (uint32_t *)image->data;
+
+	if (info->state != FBINFO_STATE_RUNNING)
+		return;
+
+	if (image->depth != 1) {
+		cfb_imageblit(info, image);
+		return;
+	}
+
+	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) {
+		NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+		info->flags |= FBINFO_HWACCEL_DISABLED;
+	}
+
+	if (info->flags & FBINFO_HWACCEL_DISABLED) {
+		cfb_imageblit(info, image);
+		return;
+	}
+
+	width = (image->width + 31) & ~31;
+	dsize = (width * image->height) >> 5;
+
+	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+		fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
+		bg = ((uint32_t *) info->pseudo_palette)[image->bg_color];
+	} else {
+		fg = image->fg_color;
+		bg = image->bg_color;
+	}
+
+	BEGIN_RING(chan, NvSubGdiRect, 0x0be4, 7);
+	OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
+	OUT_RING(chan, ((image->dy + image->height) << 16) |
+			 ((image->dx + image->width) & 0xffff));
+	OUT_RING(chan, bg);
+	OUT_RING(chan, fg);
+	OUT_RING(chan, (image->height << 16) | image->width);
+	OUT_RING(chan, (image->height << 16) | width);
+	OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
+
+	while (dsize) {
+		int iter_len = dsize > 128 ? 128 : dsize;
+
+		if (RING_SPACE(chan, iter_len + 1)) {
+			NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+			info->flags |= FBINFO_HWACCEL_DISABLED;
+			cfb_imageblit(info, image);
+			return;
+		}
+
+		BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len);
+		OUT_RINGp(chan, data, iter_len);
+		data += iter_len;
+		dsize -= iter_len;
+	}
+
+	FIRE_RING(chan);
+}
+
+static int
+nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj *obj = NULL;
+	int ret;
+
+	ret = nouveau_gpuobj_gr_new(dev_priv->channel, class, &obj);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, handle, obj, NULL);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int
+nv04_fbcon_accel_init(struct fb_info *info)
+{
+	struct nouveau_fbcon_par *par = info->par;
+	struct drm_device *dev = par->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = dev_priv->channel;
+	int surface_fmt, pattern_fmt, rect_fmt;
+	int ret;
+
+	switch (info->var.bits_per_pixel) {
+	case 8:
+		surface_fmt = 1;
+		pattern_fmt = 3;
+		rect_fmt = 3;
+		break;
+	case 16:
+		surface_fmt = 4;
+		pattern_fmt = 1;
+		rect_fmt = 1;
+		break;
+	case 32:
+		switch (info->var.transp.length) {
+		case 0: /* depth 24 */
+		case 8: /* depth 32 */
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		surface_fmt = 6;
+		pattern_fmt = 3;
+		rect_fmt = 3;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
+				   0x0062 : 0x0042, NvCtxSurf2D);
+	if (ret)
+		return ret;
+
+	ret = nv04_fbcon_grobj_new(dev, 0x0019, NvClipRect);
+	if (ret)
+		return ret;
+
+	ret = nv04_fbcon_grobj_new(dev, 0x0043, NvRop);
+	if (ret)
+		return ret;
+
+	ret = nv04_fbcon_grobj_new(dev, 0x0044, NvImagePatt);
+	if (ret)
+		return ret;
+
+	ret = nv04_fbcon_grobj_new(dev, 0x004a, NvGdiRect);
+	if (ret)
+		return ret;
+
+	ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
+				   0x009f : 0x005f, NvImageBlit);
+	if (ret)
+		return ret;
+
+	if (RING_SPACE(chan, 49)) {
+		NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+		info->flags |= FBINFO_HWACCEL_DISABLED;
+		return 0;
+	}
+
+	BEGIN_RING(chan, 1, 0x0000, 1);
+	OUT_RING(chan, NvCtxSurf2D);
+	BEGIN_RING(chan, 1, 0x0184, 2);
+	OUT_RING(chan, NvDmaFB);
+	OUT_RING(chan, NvDmaFB);
+	BEGIN_RING(chan, 1, 0x0300, 4);
+	OUT_RING(chan, surface_fmt);
+	OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
+	OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
+	OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
+
+	BEGIN_RING(chan, 1, 0x0000, 1);
+	OUT_RING(chan, NvRop);
+	BEGIN_RING(chan, 1, 0x0300, 1);
+	OUT_RING(chan, 0x55);
+
+	BEGIN_RING(chan, 1, 0x0000, 1);
+	OUT_RING(chan, NvImagePatt);
+	BEGIN_RING(chan, 1, 0x0300, 8);
+	OUT_RING(chan, pattern_fmt);
+#ifdef __BIG_ENDIAN
+	OUT_RING(chan, 2);
+#else
+	OUT_RING(chan, 1);
+#endif
+	OUT_RING(chan, 0);
+	OUT_RING(chan, 1);
+	OUT_RING(chan, ~0);
+	OUT_RING(chan, ~0);
+	OUT_RING(chan, ~0);
+	OUT_RING(chan, ~0);
+
+	BEGIN_RING(chan, 1, 0x0000, 1);
+	OUT_RING(chan, NvClipRect);
+	BEGIN_RING(chan, 1, 0x0300, 2);
+	OUT_RING(chan, 0);
+	OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
+
+	BEGIN_RING(chan, NvSubImageBlit, 0x0000, 1);
+	OUT_RING(chan, NvImageBlit);
+	BEGIN_RING(chan, NvSubImageBlit, 0x019c, 1);
+	OUT_RING(chan, NvCtxSurf2D);
+	BEGIN_RING(chan, NvSubImageBlit, 0x02fc, 1);
+	OUT_RING(chan, 3);
+
+	BEGIN_RING(chan, NvSubGdiRect, 0x0000, 1);
+	OUT_RING(chan, NvGdiRect);
+	BEGIN_RING(chan, NvSubGdiRect, 0x0198, 1);
+	OUT_RING(chan, NvCtxSurf2D);
+	BEGIN_RING(chan, NvSubGdiRect, 0x0188, 2);
+	OUT_RING(chan, NvImagePatt);
+	OUT_RING(chan, NvRop);
+	BEGIN_RING(chan, NvSubGdiRect, 0x0304, 1);
+	OUT_RING(chan, 1);
+	BEGIN_RING(chan, NvSubGdiRect, 0x0300, 1);
+	OUT_RING(chan, rect_fmt);
+	BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
+	OUT_RING(chan, 3);
+
+	FIRE_RING(chan);
+
+	info->fbops->fb_fillrect = nv04_fbcon_fillrect;
+	info->fbops->fb_copyarea = nv04_fbcon_copyarea;
+	info->fbops->fb_imageblit = nv04_fbcon_imageblit;
+	return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
new file mode 100644
index 0000000..0c3cd53
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -0,0 +1,271 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+#define NV04_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV04_RAMFC__SIZE))
+#define NV04_RAMFC__SIZE 32
+#define NV04_RAMFC_DMA_PUT                                       0x00
+#define NV04_RAMFC_DMA_GET                                       0x04
+#define NV04_RAMFC_DMA_INSTANCE                                  0x08
+#define NV04_RAMFC_DMA_STATE                                     0x0C
+#define NV04_RAMFC_DMA_FETCH                                     0x10
+#define NV04_RAMFC_ENGINE                                        0x14
+#define NV04_RAMFC_PULL1_ENGINE                                  0x18
+
+#define RAMFC_WR(offset, val) nv_wo32(dev, chan->ramfc->gpuobj, \
+					 NV04_RAMFC_##offset/4, (val))
+#define RAMFC_RD(offset)      nv_ro32(dev, chan->ramfc->gpuobj, \
+					 NV04_RAMFC_##offset/4)
+
+void
+nv04_fifo_disable(struct drm_device *dev)
+{
+	uint32_t tmp;
+
+	tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH);
+	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, tmp & ~1);
+	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
+	tmp = nv_rd32(dev, NV03_PFIFO_CACHE1_PULL1);
+	nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, tmp & ~1);
+}
+
+void
+nv04_fifo_enable(struct drm_device *dev)
+{
+	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
+	nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+}
+
+bool
+nv04_fifo_reassign(struct drm_device *dev, bool enable)
+{
+	uint32_t reassign = nv_rd32(dev, NV03_PFIFO_CACHES);
+
+	nv_wr32(dev, NV03_PFIFO_CACHES, enable ? 1 : 0);
+	return (reassign == 1);
+}
+
+int
+nv04_fifo_channel_id(struct drm_device *dev)
+{
+	return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
+			NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
+}
+
+int
+nv04_fifo_create_context(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int ret;
+
+	ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0,
+						NV04_RAMFC__SIZE,
+						NVOBJ_FLAG_ZERO_ALLOC |
+						NVOBJ_FLAG_ZERO_FREE,
+						NULL, &chan->ramfc);
+	if (ret)
+		return ret;
+
+	/* Setup initial state */
+	dev_priv->engine.instmem.prepare_access(dev, true);
+	RAMFC_WR(DMA_PUT, chan->pushbuf_base);
+	RAMFC_WR(DMA_GET, chan->pushbuf_base);
+	RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4);
+	RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
+#ifdef __BIG_ENDIAN
+			     NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+			     0));
+	dev_priv->engine.instmem.finish_access(dev);
+
+	/* enable the fifo dma operation */
+	nv_wr32(dev, NV04_PFIFO_MODE,
+		nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
+	return 0;
+}
+
+void
+nv04_fifo_destroy_context(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+
+	nv_wr32(dev, NV04_PFIFO_MODE,
+		nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
+
+	nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+}
+
+static void
+nv04_fifo_do_load_context(struct drm_device *dev, int chid)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t fc = NV04_RAMFC(chid), tmp;
+
+	dev_priv->engine.instmem.prepare_access(dev, false);
+
+	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
+	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
+	tmp = nv_ri32(dev, fc + 8);
+	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
+	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
+	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 12));
+	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 16));
+	nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20));
+	nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24));
+
+	dev_priv->engine.instmem.finish_access(dev);
+
+	nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
+	nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
+}
+
+int
+nv04_fifo_load_context(struct nouveau_channel *chan)
+{
+	uint32_t tmp;
+
+	nv_wr32(chan->dev, NV03_PFIFO_CACHE1_PUSH1,
+			   NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
+	nv04_fifo_do_load_context(chan->dev, chan->id);
+	nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
+
+	/* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
+	tmp = nv_rd32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
+	nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
+
+	return 0;
+}
+
+int
+nv04_fifo_unload_context(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	struct nouveau_channel *chan = NULL;
+	uint32_t tmp;
+	int chid;
+
+	chid = pfifo->channel_id(dev);
+	if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
+		return 0;
+
+	chan = dev_priv->fifos[chid];
+	if (!chan) {
+		NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
+		return -EINVAL;
+	}
+
+	dev_priv->engine.instmem.prepare_access(dev, true);
+	RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
+	RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
+	tmp  = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16;
+	tmp |= nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE);
+	RAMFC_WR(DMA_INSTANCE, tmp);
+	RAMFC_WR(DMA_STATE, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
+	RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
+	RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
+	RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
+	dev_priv->engine.instmem.finish_access(dev);
+
+	nv04_fifo_do_load_context(dev, pfifo->channels - 1);
+	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
+	return 0;
+}
+
+static void
+nv04_fifo_init_reset(struct drm_device *dev)
+{
+	nv_wr32(dev, NV03_PMC_ENABLE,
+		nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
+	nv_wr32(dev, NV03_PMC_ENABLE,
+		nv_rd32(dev, NV03_PMC_ENABLE) |  NV_PMC_ENABLE_PFIFO);
+
+	nv_wr32(dev, 0x003224, 0x000f0078);
+	nv_wr32(dev, 0x002044, 0x0101ffff);
+	nv_wr32(dev, 0x002040, 0x000000ff);
+	nv_wr32(dev, 0x002500, 0x00000000);
+	nv_wr32(dev, 0x003000, 0x00000000);
+	nv_wr32(dev, 0x003050, 0x00000000);
+	nv_wr32(dev, 0x003200, 0x00000000);
+	nv_wr32(dev, 0x003250, 0x00000000);
+	nv_wr32(dev, 0x003220, 0x00000000);
+
+	nv_wr32(dev, 0x003250, 0x00000000);
+	nv_wr32(dev, 0x003270, 0x00000000);
+	nv_wr32(dev, 0x003210, 0x00000000);
+}
+
+static void
+nv04_fifo_init_ramxx(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+				       ((dev_priv->ramht_bits - 9) << 16) |
+				       (dev_priv->ramht_offset >> 8));
+	nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
+	nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8);
+}
+
+static void
+nv04_fifo_init_intr(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x002100, 0xffffffff);
+	nv_wr32(dev, 0x002140, 0xffffffff);
+}
+
+int
+nv04_fifo_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	int i;
+
+	nv04_fifo_init_reset(dev);
+	nv04_fifo_init_ramxx(dev);
+
+	nv04_fifo_do_load_context(dev, pfifo->channels - 1);
+	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
+
+	nv04_fifo_init_intr(dev);
+	pfifo->enable(dev);
+
+	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+		if (dev_priv->fifos[i]) {
+			uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
+			nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
+		}
+	}
+
+	return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
new file mode 100644
index 0000000..396ee92
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -0,0 +1,579 @@
+/*
+ * Copyright 2007 Stephane Marchesin
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drm.h"
+#include "nouveau_drv.h"
+
+static uint32_t nv04_graph_ctx_regs[] = {
+	NV04_PGRAPH_CTX_SWITCH1,
+	NV04_PGRAPH_CTX_SWITCH2,
+	NV04_PGRAPH_CTX_SWITCH3,
+	NV04_PGRAPH_CTX_SWITCH4,
+	NV04_PGRAPH_CTX_CACHE1,
+	NV04_PGRAPH_CTX_CACHE2,
+	NV04_PGRAPH_CTX_CACHE3,
+	NV04_PGRAPH_CTX_CACHE4,
+	0x00400184,
+	0x004001a4,
+	0x004001c4,
+	0x004001e4,
+	0x00400188,
+	0x004001a8,
+	0x004001c8,
+	0x004001e8,
+	0x0040018c,
+	0x004001ac,
+	0x004001cc,
+	0x004001ec,
+	0x00400190,
+	0x004001b0,
+	0x004001d0,
+	0x004001f0,
+	0x00400194,
+	0x004001b4,
+	0x004001d4,
+	0x004001f4,
+	0x00400198,
+	0x004001b8,
+	0x004001d8,
+	0x004001f8,
+	0x0040019c,
+	0x004001bc,
+	0x004001dc,
+	0x004001fc,
+	0x00400174,
+	NV04_PGRAPH_DMA_START_0,
+	NV04_PGRAPH_DMA_START_1,
+	NV04_PGRAPH_DMA_LENGTH,
+	NV04_PGRAPH_DMA_MISC,
+	NV04_PGRAPH_DMA_PITCH,
+	NV04_PGRAPH_BOFFSET0,
+	NV04_PGRAPH_BBASE0,
+	NV04_PGRAPH_BLIMIT0,
+	NV04_PGRAPH_BOFFSET1,
+	NV04_PGRAPH_BBASE1,
+	NV04_PGRAPH_BLIMIT1,
+	NV04_PGRAPH_BOFFSET2,
+	NV04_PGRAPH_BBASE2,
+	NV04_PGRAPH_BLIMIT2,
+	NV04_PGRAPH_BOFFSET3,
+	NV04_PGRAPH_BBASE3,
+	NV04_PGRAPH_BLIMIT3,
+	NV04_PGRAPH_BOFFSET4,
+	NV04_PGRAPH_BBASE4,
+	NV04_PGRAPH_BLIMIT4,
+	NV04_PGRAPH_BOFFSET5,
+	NV04_PGRAPH_BBASE5,
+	NV04_PGRAPH_BLIMIT5,
+	NV04_PGRAPH_BPITCH0,
+	NV04_PGRAPH_BPITCH1,
+	NV04_PGRAPH_BPITCH2,
+	NV04_PGRAPH_BPITCH3,
+	NV04_PGRAPH_BPITCH4,
+	NV04_PGRAPH_SURFACE,
+	NV04_PGRAPH_STATE,
+	NV04_PGRAPH_BSWIZZLE2,
+	NV04_PGRAPH_BSWIZZLE5,
+	NV04_PGRAPH_BPIXEL,
+	NV04_PGRAPH_NOTIFY,
+	NV04_PGRAPH_PATT_COLOR0,
+	NV04_PGRAPH_PATT_COLOR1,
+	NV04_PGRAPH_PATT_COLORRAM+0x00,
+	NV04_PGRAPH_PATT_COLORRAM+0x01,
+	NV04_PGRAPH_PATT_COLORRAM+0x02,
+	NV04_PGRAPH_PATT_COLORRAM+0x03,
+	NV04_PGRAPH_PATT_COLORRAM+0x04,
+	NV04_PGRAPH_PATT_COLORRAM+0x05,
+	NV04_PGRAPH_PATT_COLORRAM+0x06,
+	NV04_PGRAPH_PATT_COLORRAM+0x07,
+	NV04_PGRAPH_PATT_COLORRAM+0x08,
+	NV04_PGRAPH_PATT_COLORRAM+0x09,
+	NV04_PGRAPH_PATT_COLORRAM+0x0A,
+	NV04_PGRAPH_PATT_COLORRAM+0x0B,
+	NV04_PGRAPH_PATT_COLORRAM+0x0C,
+	NV04_PGRAPH_PATT_COLORRAM+0x0D,
+	NV04_PGRAPH_PATT_COLORRAM+0x0E,
+	NV04_PGRAPH_PATT_COLORRAM+0x0F,
+	NV04_PGRAPH_PATT_COLORRAM+0x10,
+	NV04_PGRAPH_PATT_COLORRAM+0x11,
+	NV04_PGRAPH_PATT_COLORRAM+0x12,
+	NV04_PGRAPH_PATT_COLORRAM+0x13,
+	NV04_PGRAPH_PATT_COLORRAM+0x14,
+	NV04_PGRAPH_PATT_COLORRAM+0x15,
+	NV04_PGRAPH_PATT_COLORRAM+0x16,
+	NV04_PGRAPH_PATT_COLORRAM+0x17,
+	NV04_PGRAPH_PATT_COLORRAM+0x18,
+	NV04_PGRAPH_PATT_COLORRAM+0x19,
+	NV04_PGRAPH_PATT_COLORRAM+0x1A,
+	NV04_PGRAPH_PATT_COLORRAM+0x1B,
+	NV04_PGRAPH_PATT_COLORRAM+0x1C,
+	NV04_PGRAPH_PATT_COLORRAM+0x1D,
+	NV04_PGRAPH_PATT_COLORRAM+0x1E,
+	NV04_PGRAPH_PATT_COLORRAM+0x1F,
+	NV04_PGRAPH_PATT_COLORRAM+0x20,
+	NV04_PGRAPH_PATT_COLORRAM+0x21,
+	NV04_PGRAPH_PATT_COLORRAM+0x22,
+	NV04_PGRAPH_PATT_COLORRAM+0x23,
+	NV04_PGRAPH_PATT_COLORRAM+0x24,
+	NV04_PGRAPH_PATT_COLORRAM+0x25,
+	NV04_PGRAPH_PATT_COLORRAM+0x26,
+	NV04_PGRAPH_PATT_COLORRAM+0x27,
+	NV04_PGRAPH_PATT_COLORRAM+0x28,
+	NV04_PGRAPH_PATT_COLORRAM+0x29,
+	NV04_PGRAPH_PATT_COLORRAM+0x2A,
+	NV04_PGRAPH_PATT_COLORRAM+0x2B,
+	NV04_PGRAPH_PATT_COLORRAM+0x2C,
+	NV04_PGRAPH_PATT_COLORRAM+0x2D,
+	NV04_PGRAPH_PATT_COLORRAM+0x2E,
+	NV04_PGRAPH_PATT_COLORRAM+0x2F,
+	NV04_PGRAPH_PATT_COLORRAM+0x30,
+	NV04_PGRAPH_PATT_COLORRAM+0x31,
+	NV04_PGRAPH_PATT_COLORRAM+0x32,
+	NV04_PGRAPH_PATT_COLORRAM+0x33,
+	NV04_PGRAPH_PATT_COLORRAM+0x34,
+	NV04_PGRAPH_PATT_COLORRAM+0x35,
+	NV04_PGRAPH_PATT_COLORRAM+0x36,
+	NV04_PGRAPH_PATT_COLORRAM+0x37,
+	NV04_PGRAPH_PATT_COLORRAM+0x38,
+	NV04_PGRAPH_PATT_COLORRAM+0x39,
+	NV04_PGRAPH_PATT_COLORRAM+0x3A,
+	NV04_PGRAPH_PATT_COLORRAM+0x3B,
+	NV04_PGRAPH_PATT_COLORRAM+0x3C,
+	NV04_PGRAPH_PATT_COLORRAM+0x3D,
+	NV04_PGRAPH_PATT_COLORRAM+0x3E,
+	NV04_PGRAPH_PATT_COLORRAM+0x3F,
+	NV04_PGRAPH_PATTERN,
+	0x0040080c,
+	NV04_PGRAPH_PATTERN_SHAPE,
+	0x00400600,
+	NV04_PGRAPH_ROP3,
+	NV04_PGRAPH_CHROMA,
+	NV04_PGRAPH_BETA_AND,
+	NV04_PGRAPH_BETA_PREMULT,
+	NV04_PGRAPH_CONTROL0,
+	NV04_PGRAPH_CONTROL1,
+	NV04_PGRAPH_CONTROL2,
+	NV04_PGRAPH_BLEND,
+	NV04_PGRAPH_STORED_FMT,
+	NV04_PGRAPH_SOURCE_COLOR,
+	0x00400560,
+	0x00400568,
+	0x00400564,
+	0x0040056c,
+	0x00400400,
+	0x00400480,
+	0x00400404,
+	0x00400484,
+	0x00400408,
+	0x00400488,
+	0x0040040c,
+	0x0040048c,
+	0x00400410,
+	0x00400490,
+	0x00400414,
+	0x00400494,
+	0x00400418,
+	0x00400498,
+	0x0040041c,
+	0x0040049c,
+	0x00400420,
+	0x004004a0,
+	0x00400424,
+	0x004004a4,
+	0x00400428,
+	0x004004a8,
+	0x0040042c,
+	0x004004ac,
+	0x00400430,
+	0x004004b0,
+	0x00400434,
+	0x004004b4,
+	0x00400438,
+	0x004004b8,
+	0x0040043c,
+	0x004004bc,
+	0x00400440,
+	0x004004c0,
+	0x00400444,
+	0x004004c4,
+	0x00400448,
+	0x004004c8,
+	0x0040044c,
+	0x004004cc,
+	0x00400450,
+	0x004004d0,
+	0x00400454,
+	0x004004d4,
+	0x00400458,
+	0x004004d8,
+	0x0040045c,
+	0x004004dc,
+	0x00400460,
+	0x004004e0,
+	0x00400464,
+	0x004004e4,
+	0x00400468,
+	0x004004e8,
+	0x0040046c,
+	0x004004ec,
+	0x00400470,
+	0x004004f0,
+	0x00400474,
+	0x004004f4,
+	0x00400478,
+	0x004004f8,
+	0x0040047c,
+	0x004004fc,
+	0x0040053c,
+	0x00400544,
+	0x00400540,
+	0x00400548,
+	0x00400560,
+	0x00400568,
+	0x00400564,
+	0x0040056c,
+	0x00400534,
+	0x00400538,
+	0x00400514,
+	0x00400518,
+	0x0040051c,
+	0x00400520,
+	0x00400524,
+	0x00400528,
+	0x0040052c,
+	0x00400530,
+	0x00400d00,
+	0x00400d40,
+	0x00400d80,
+	0x00400d04,
+	0x00400d44,
+	0x00400d84,
+	0x00400d08,
+	0x00400d48,
+	0x00400d88,
+	0x00400d0c,
+	0x00400d4c,
+	0x00400d8c,
+	0x00400d10,
+	0x00400d50,
+	0x00400d90,
+	0x00400d14,
+	0x00400d54,
+	0x00400d94,
+	0x00400d18,
+	0x00400d58,
+	0x00400d98,
+	0x00400d1c,
+	0x00400d5c,
+	0x00400d9c,
+	0x00400d20,
+	0x00400d60,
+	0x00400da0,
+	0x00400d24,
+	0x00400d64,
+	0x00400da4,
+	0x00400d28,
+	0x00400d68,
+	0x00400da8,
+	0x00400d2c,
+	0x00400d6c,
+	0x00400dac,
+	0x00400d30,
+	0x00400d70,
+	0x00400db0,
+	0x00400d34,
+	0x00400d74,
+	0x00400db4,
+	0x00400d38,
+	0x00400d78,
+	0x00400db8,
+	0x00400d3c,
+	0x00400d7c,
+	0x00400dbc,
+	0x00400590,
+	0x00400594,
+	0x00400598,
+	0x0040059c,
+	0x004005a8,
+	0x004005ac,
+	0x004005b0,
+	0x004005b4,
+	0x004005c0,
+	0x004005c4,
+	0x004005c8,
+	0x004005cc,
+	0x004005d0,
+	0x004005d4,
+	0x004005d8,
+	0x004005dc,
+	0x004005e0,
+	NV04_PGRAPH_PASSTHRU_0,
+	NV04_PGRAPH_PASSTHRU_1,
+	NV04_PGRAPH_PASSTHRU_2,
+	NV04_PGRAPH_DVD_COLORFMT,
+	NV04_PGRAPH_SCALED_FORMAT,
+	NV04_PGRAPH_MISC24_0,
+	NV04_PGRAPH_MISC24_1,
+	NV04_PGRAPH_MISC24_2,
+	0x00400500,
+	0x00400504,
+	NV04_PGRAPH_VALID1,
+	NV04_PGRAPH_VALID2
+
+
+};
+
+struct graph_state {
+	int nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
+};
+
+struct nouveau_channel *
+nv04_graph_channel(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int chid = dev_priv->engine.fifo.channels;
+
+	if (nv_rd32(dev, NV04_PGRAPH_CTX_CONTROL) & 0x00010000)
+		chid = nv_rd32(dev, NV04_PGRAPH_CTX_USER) >> 24;
+
+	if (chid >= dev_priv->engine.fifo.channels)
+		return NULL;
+
+	return dev_priv->fifos[chid];
+}
+
+void
+nv04_graph_context_switch(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nouveau_channel *chan = NULL;
+	int chid;
+
+	pgraph->fifo_access(dev, false);
+	nouveau_wait_for_idle(dev);
+
+	/* If previous context is valid, we need to save it */
+	pgraph->unload_context(dev);
+
+	/* Load context for next channel */
+	chid = dev_priv->engine.fifo.channel_id(dev);
+	chan = dev_priv->fifos[chid];
+	if (chan)
+		nv04_graph_load_context(chan);
+
+	pgraph->fifo_access(dev, true);
+}
+
+int nv04_graph_create_context(struct nouveau_channel *chan)
+{
+	struct graph_state *pgraph_ctx;
+	NV_DEBUG(chan->dev, "nv04_graph_context_create %d\n", chan->id);
+
+	chan->pgraph_ctx = pgraph_ctx = kzalloc(sizeof(*pgraph_ctx),
+						GFP_KERNEL);
+	if (pgraph_ctx == NULL)
+		return -ENOMEM;
+
+	/* dev_priv->fifos[channel].pgraph_ctx_user = channel << 24; */
+	pgraph_ctx->nv04[0] = 0x0001ffff;
+	/* is it really needed ??? */
+#if 0
+	dev_priv->fifos[channel].pgraph_ctx[1] =
+					nv_rd32(dev, NV_PGRAPH_DEBUG_4);
+	dev_priv->fifos[channel].pgraph_ctx[2] =
+					nv_rd32(dev, 0x004006b0);
+#endif
+	return 0;
+}
+
+void nv04_graph_destroy_context(struct nouveau_channel *chan)
+{
+	struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+
+	kfree(pgraph_ctx);
+	chan->pgraph_ctx = NULL;
+}
+
+int nv04_graph_load_context(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+	uint32_t tmp;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
+		nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]);
+
+	nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
+	nv_wr32(dev, NV04_PGRAPH_CTX_USER, chan->id << 24);
+	tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2);
+	nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff);
+	return 0;
+}
+
+int
+nv04_graph_unload_context(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nouveau_channel *chan = NULL;
+	struct graph_state *ctx;
+	uint32_t tmp;
+	int i;
+
+	chan = pgraph->channel(dev);
+	if (!chan)
+		return 0;
+	ctx = chan->pgraph_ctx;
+
+	for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
+		ctx->nv04[i] = nv_rd32(dev, nv04_graph_ctx_regs[i]);
+
+	nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
+	tmp  = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
+	tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
+	nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
+	return 0;
+}
+
+int nv04_graph_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t tmp;
+
+	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
+			~NV_PMC_ENABLE_PGRAPH);
+	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
+			 NV_PMC_ENABLE_PGRAPH);
+
+	/* Enable PGRAPH interrupts */
+	nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF);
+	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+	nv_wr32(dev, NV04_PGRAPH_VALID1, 0);
+	nv_wr32(dev, NV04_PGRAPH_VALID2, 0);
+	/*nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x000001FF);
+	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
+	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x1231c000);
+	/*1231C000 blob, 001 haiku*/
+	//*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
+	nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x72111100);
+	/*0x72111100 blob , 01 haiku*/
+	/*nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
+	nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
+	/*haiku same*/
+
+	/*nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
+	nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
+	/*haiku and blob 10d4*/
+
+	nv_wr32(dev, NV04_PGRAPH_STATE        , 0xFFFFFFFF);
+	nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL  , 0x10000100);
+	tmp  = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
+	tmp |= dev_priv->engine.fifo.channels << 24;
+	nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
+
+	/* These don't belong here, they're part of a per-channel context */
+	nv_wr32(dev, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
+	nv_wr32(dev, NV04_PGRAPH_BETA_AND     , 0xFFFFFFFF);
+
+	return 0;
+}
+
+void nv04_graph_takedown(struct drm_device *dev)
+{
+}
+
+void
+nv04_graph_fifo_access(struct drm_device *dev, bool enabled)
+{
+	if (enabled)
+		nv_wr32(dev, NV04_PGRAPH_FIFO,
+					nv_rd32(dev, NV04_PGRAPH_FIFO) | 1);
+	else
+		nv_wr32(dev, NV04_PGRAPH_FIFO,
+					nv_rd32(dev, NV04_PGRAPH_FIFO) & ~1);
+}
+
+static int
+nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
+			int mthd, uint32_t data)
+{
+	chan->fence.last_sequence_irq = data;
+	nouveau_fence_handler(chan->dev, chan->id);
+	return 0;
+}
+
+static int
+nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
+			      int mthd, uint32_t data)
+{
+	struct drm_device *dev = chan->dev;
+	uint32_t instance = nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff;
+	int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
+	uint32_t tmp;
+
+	tmp  = nv_ri32(dev, instance);
+	tmp &= ~0x00038000;
+	tmp |= ((data & 7) << 15);
+
+	nv_wi32(dev, instance, tmp);
+	nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp);
+	nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + subc, tmp);
+	return 0;
+}
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_m2mf[] = {
+	{ 0x0150, nv04_graph_mthd_set_ref },
+	{}
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = {
+	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{},
+};
+
+struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
+	{ 0x0039, false, nv04_graph_mthds_m2mf },
+	{ 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */
+	{ 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */
+	{ 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */
+	{ 0x0077, false, nv04_graph_mthds_set_operation }, /* sifm */
+	{ 0x0030, false, NULL }, /* null */
+	{ 0x0042, false, NULL }, /* surf2d */
+	{ 0x0043, false, NULL }, /* rop */
+	{ 0x0012, false, NULL }, /* beta1 */
+	{ 0x0072, false, NULL }, /* beta4 */
+	{ 0x0019, false, NULL }, /* cliprect */
+	{ 0x0044, false, NULL }, /* pattern */
+	{ 0x0052, false, NULL }, /* swzsurf */
+	{ 0x0053, false, NULL }, /* surf3d */
+	{ 0x0054, false, NULL }, /* tex_tri */
+	{ 0x0055, false, NULL }, /* multitex_tri */
+	{}
+};
+
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
new file mode 100644
index 0000000..a20c206
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -0,0 +1,208 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+/* returns the size of fifo context */
+static int
+nouveau_fifo_ctx_size(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->chipset >= 0x40)
+		return 128;
+	else
+	if (dev_priv->chipset >= 0x17)
+		return 64;
+
+	return 32;
+}
+
+static void
+nv04_instmem_determine_amount(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int i;
+
+	/* Figure out how much instance memory we need */
+	if (dev_priv->card_type >= NV_40) {
+		/* We'll want more instance memory than this on some NV4x cards.
+		 * There's a 16MB aperture to play with that maps onto the end
+		 * of vram.  For now, only reserve a small piece until we know
+		 * more about what each chipset requires.
+		 */
+		switch (dev_priv->chipset & 0xf0) {
+		case 0x40:
+		case 0x47:
+		case 0x49:
+		case 0x4b:
+			dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024);
+			break;
+		default:
+			dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024);
+			break;
+		}
+	} else {
+		/*XXX: what *are* the limits on <NV40 cards?
+		 */
+		dev_priv->ramin_rsvd_vram = (512 * 1024);
+	}
+	NV_DEBUG(dev, "RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram >> 10);
+
+	/* Clear all of it, except the BIOS image that's in the first 64KiB */
+	dev_priv->engine.instmem.prepare_access(dev, true);
+	for (i = 64 * 1024; i < dev_priv->ramin_rsvd_vram; i += 4)
+		nv_wi32(dev, i, 0x00000000);
+	dev_priv->engine.instmem.finish_access(dev);
+}
+
+static void
+nv04_instmem_configure_fixed_tables(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_engine *engine = &dev_priv->engine;
+
+	/* FIFO hash table (RAMHT)
+	 *   use 4k hash table at RAMIN+0x10000
+	 *   TODO: extend the hash table
+	 */
+	dev_priv->ramht_offset = 0x10000;
+	dev_priv->ramht_bits   = 9;
+	dev_priv->ramht_size   = (1 << dev_priv->ramht_bits); /* nr entries */
+	dev_priv->ramht_size  *= 8; /* 2 32-bit values per entry in RAMHT */
+	NV_DEBUG(dev, "RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset,
+						      dev_priv->ramht_size);
+
+	/* FIFO runout table (RAMRO) - 512k at 0x11200 */
+	dev_priv->ramro_offset = 0x11200;
+	dev_priv->ramro_size   = 512;
+	NV_DEBUG(dev, "RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset,
+						      dev_priv->ramro_size);
+
+	/* FIFO context table (RAMFC)
+	 *   NV40  : Not sure exactly how to position RAMFC on some cards,
+	 *           0x30002 seems to position it at RAMIN+0x20000 on these
+	 *           cards.  RAMFC is 4kb (32 fifos, 128byte entries).
+	 *   Others: Position RAMFC at RAMIN+0x11400
+	 */
+	dev_priv->ramfc_size = engine->fifo.channels *
+						nouveau_fifo_ctx_size(dev);
+	switch (dev_priv->card_type) {
+	case NV_40:
+		dev_priv->ramfc_offset = 0x20000;
+		break;
+	case NV_30:
+	case NV_20:
+	case NV_10:
+	case NV_04:
+	default:
+		dev_priv->ramfc_offset = 0x11400;
+		break;
+	}
+	NV_DEBUG(dev, "RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset,
+						      dev_priv->ramfc_size);
+}
+
+int nv04_instmem_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t offset;
+	int ret = 0;
+
+	nv04_instmem_determine_amount(dev);
+	nv04_instmem_configure_fixed_tables(dev);
+
+	/* Create a heap to manage RAMIN allocations, we don't allocate
+	 * the space that was reserved for RAMHT/FC/RO.
+	 */
+	offset = dev_priv->ramfc_offset + dev_priv->ramfc_size;
+
+	/* It appears RAMRO (or something?) is controlled by 0x2220/0x2230
+	 * on certain NV4x chipsets as well as RAMFC.  When 0x2230 == 0
+	 * ("new style" control) the upper 16-bits of 0x2220 points at this
+	 * other mysterious table that's clobbering important things.
+	 *
+	 * We're now pointing this at RAMIN+0x30000 to avoid RAMFC getting
+	 * smashed to pieces on us, so reserve 0x30000-0x40000 too..
+	 */
+	if (dev_priv->card_type >= NV_40) {
+		if (offset < 0x40000)
+			offset = 0x40000;
+	}
+
+	ret = nouveau_mem_init_heap(&dev_priv->ramin_heap,
+				    offset, dev_priv->ramin_rsvd_vram - offset);
+	if (ret) {
+		dev_priv->ramin_heap = NULL;
+		NV_ERROR(dev, "Failed to init RAMIN heap\n");
+	}
+
+	return ret;
+}
+
+void
+nv04_instmem_takedown(struct drm_device *dev)
+{
+}
+
+int
+nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz)
+{
+	if (gpuobj->im_backing)
+		return -EINVAL;
+
+	return 0;
+}
+
+void
+nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (gpuobj && gpuobj->im_backing) {
+		if (gpuobj->im_bound)
+			dev_priv->engine.instmem.unbind(dev, gpuobj);
+		gpuobj->im_backing = NULL;
+	}
+}
+
+int
+nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+	if (!gpuobj->im_pramin || gpuobj->im_bound)
+		return -EINVAL;
+
+	gpuobj->im_bound = 1;
+	return 0;
+}
+
+int
+nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+	if (gpuobj->im_bound == 0)
+		return -EINVAL;
+
+	gpuobj->im_bound = 0;
+	return 0;
+}
+
+void
+nv04_instmem_prepare_access(struct drm_device *dev, bool write)
+{
+}
+
+void
+nv04_instmem_finish_access(struct drm_device *dev)
+{
+}
+
+int
+nv04_instmem_suspend(struct drm_device *dev)
+{
+	return 0;
+}
+
+void
+nv04_instmem_resume(struct drm_device *dev)
+{
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv04_mc.c b/drivers/gpu/drm/nouveau/nv04_mc.c
new file mode 100644
index 0000000..617ed1e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_mc.c
@@ -0,0 +1,20 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv04_mc_init(struct drm_device *dev)
+{
+	/* Power up everything, resetting each individual unit will
+	 * be done later if needed.
+	 */
+
+	nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
+	return 0;
+}
+
+void
+nv04_mc_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_timer.c b/drivers/gpu/drm/nouveau/nv04_timer.c
new file mode 100644
index 0000000..1d09ddd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_timer.c
@@ -0,0 +1,51 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv04_timer_init(struct drm_device *dev)
+{
+	nv_wr32(dev, NV04_PTIMER_INTR_EN_0, 0x00000000);
+	nv_wr32(dev, NV04_PTIMER_INTR_0, 0xFFFFFFFF);
+
+	/* Just use the pre-existing values when possible for now; these regs
+	 * are not written in nv (driver writer missed a /4 on the address), and
+	 * writing 8 and 3 to the correct regs breaks the timings on the LVDS
+	 * hardware sequencing microcode.
+	 * A correct solution (involving calculations with the GPU PLL) can
+	 * be done when kernel modesetting lands
+	 */
+	if (!nv_rd32(dev, NV04_PTIMER_NUMERATOR) ||
+				!nv_rd32(dev, NV04_PTIMER_DENOMINATOR)) {
+		nv_wr32(dev, NV04_PTIMER_NUMERATOR, 0x00000008);
+		nv_wr32(dev, NV04_PTIMER_DENOMINATOR, 0x00000003);
+	}
+
+	return 0;
+}
+
+uint64_t
+nv04_timer_read(struct drm_device *dev)
+{
+	uint32_t low;
+	/* From kmmio dumps on nv28 this looks like how the blob does this.
+	 * It reads the high dword twice, before and after.
+	 * The only explanation seems to be that the 64-bit timer counter
+	 * advances between high and low dword reads and may corrupt the
+	 * result. Not confirmed.
+	 */
+	uint32_t high2 = nv_rd32(dev, NV04_PTIMER_TIME_1);
+	uint32_t high1;
+	do {
+		high1 = high2;
+		low = nv_rd32(dev, NV04_PTIMER_TIME_0);
+		high2 = nv_rd32(dev, NV04_PTIMER_TIME_1);
+	} while (high1 != high2);
+	return (((uint64_t)high2) << 32) | (uint64_t)low;
+}
+
+void
+nv04_timer_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
new file mode 100644
index 0000000..9c63099
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_tv.c
@@ -0,0 +1,305 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_crtc.h"
+#include "nouveau_hw.h"
+#include "drm_crtc_helper.h"
+
+#include "i2c/ch7006.h"
+
+static struct {
+	struct i2c_board_info board_info;
+	struct drm_encoder_funcs funcs;
+	struct drm_encoder_helper_funcs hfuncs;
+	void *params;
+
+} nv04_tv_encoder_info[] = {
+	{
+		.board_info = { I2C_BOARD_INFO("ch7006", 0x75) },
+		.params = &(struct ch7006_encoder_params) {
+			CH7006_FORMAT_RGB24m12I, CH7006_CLOCK_MASTER,
+			0, 0, 0,
+			CH7006_SYNC_SLAVE, CH7006_SYNC_SEPARATED,
+			CH7006_POUT_3_3V, CH7006_ACTIVE_HSYNC
+		},
+	},
+};
+
+static bool probe_i2c_addr(struct i2c_adapter *adapter, int addr)
+{
+	struct i2c_msg msg = {
+		.addr = addr,
+		.len = 0,
+	};
+
+	return i2c_transfer(adapter, &msg, 1) == 1;
+}
+
+int nv04_tv_identify(struct drm_device *dev, int i2c_index)
+{
+	struct nouveau_i2c_chan *i2c;
+	bool was_locked;
+	int i, ret;
+
+	NV_TRACE(dev, "Probing TV encoders on I2C bus: %d\n", i2c_index);
+
+	i2c = nouveau_i2c_find(dev, i2c_index);
+	if (!i2c)
+		return -ENODEV;
+
+	was_locked = NVLockVgaCrtcs(dev, false);
+
+	for (i = 0; i < ARRAY_SIZE(nv04_tv_encoder_info); i++) {
+		if (probe_i2c_addr(&i2c->adapter,
+				   nv04_tv_encoder_info[i].board_info.addr)) {
+			ret = i;
+			break;
+		}
+	}
+
+	if (i < ARRAY_SIZE(nv04_tv_encoder_info)) {
+		NV_TRACE(dev, "Detected TV encoder: %s\n",
+			 nv04_tv_encoder_info[i].board_info.type);
+
+	} else {
+		NV_TRACE(dev, "No TV encoders found.\n");
+		i = -ENODEV;
+	}
+
+	NVLockVgaCrtcs(dev, was_locked);
+	return i;
+}
+
+#define PLLSEL_TV_CRTC1_MASK				\
+	(NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1		\
+	 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1)
+#define PLLSEL_TV_CRTC2_MASK				\
+	(NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2		\
+	 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2)
+
+static void nv04_tv_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv04_mode_state *state = &dev_priv->mode_reg;
+	uint8_t crtc1A;
+
+	NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
+		mode, nv_encoder->dcb->index);
+
+	state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK);
+
+	if (mode == DRM_MODE_DPMS_ON) {
+		int head = nouveau_crtc(encoder->crtc)->index;
+		crtc1A = NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX);
+
+		state->pllsel |= head ? PLLSEL_TV_CRTC2_MASK :
+					PLLSEL_TV_CRTC1_MASK;
+
+		/* Inhibit hsync */
+		crtc1A |= 0x80;
+
+		NVWriteVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX, crtc1A);
+	}
+
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
+
+	to_encoder_slave(encoder)->slave_funcs->dpms(encoder, mode);
+}
+
+static void nv04_tv_bind(struct drm_device *dev, int head, bool bind)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv04_crtc_reg *state = &dev_priv->mode_reg.crtc_reg[head];
+
+	state->tv_setup = 0;
+
+	if (bind) {
+		state->CRTC[NV_CIO_CRE_LCD__INDEX] = 0;
+		state->CRTC[NV_CIO_CRE_49] |= 0x10;
+	} else {
+		state->CRTC[NV_CIO_CRE_49] &= ~0x10;
+	}
+
+	NVWriteVgaCrtc(dev, head, NV_CIO_CRE_LCD__INDEX,
+		       state->CRTC[NV_CIO_CRE_LCD__INDEX]);
+	NVWriteVgaCrtc(dev, head, NV_CIO_CRE_49,
+		       state->CRTC[NV_CIO_CRE_49]);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP,
+		      state->tv_setup);
+}
+
+static void nv04_tv_prepare(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	int head = nouveau_crtc(encoder->crtc)->index;
+	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+
+	helper->dpms(encoder, DRM_MODE_DPMS_OFF);
+
+	nv04_dfp_disable(dev, head);
+
+	if (nv_two_heads(dev))
+		nv04_tv_bind(dev, head ^ 1, false);
+
+	nv04_tv_bind(dev, head, true);
+}
+
+static void nv04_tv_mode_set(struct drm_encoder *encoder,
+			     struct drm_display_mode *mode,
+			     struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+	struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+
+	regp->tv_htotal = adjusted_mode->htotal;
+	regp->tv_vtotal = adjusted_mode->vtotal;
+
+	/* These delay the TV signals with respect to the VGA port,
+	 * they might be useful if we ever allow a CRTC to drive
+	 * multiple outputs.
+	 */
+	regp->tv_hskew = 1;
+	regp->tv_hsync_delay = 1;
+	regp->tv_hsync_delay2 = 64;
+	regp->tv_vskew = 1;
+	regp->tv_vsync_delay = 1;
+
+	to_encoder_slave(encoder)->slave_funcs->mode_set(encoder, mode, adjusted_mode);
+}
+
+static void nv04_tv_commit(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+
+	helper->dpms(encoder, DRM_MODE_DPMS_ON);
+
+	NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
+		      drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index,
+		      '@' + ffs(nv_encoder->dcb->or));
+}
+
+static void nv04_tv_destroy(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+	to_encoder_slave(encoder)->slave_funcs->destroy(encoder);
+
+	drm_encoder_cleanup(encoder);
+
+	kfree(nv_encoder);
+}
+
+int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry)
+{
+	struct nouveau_encoder *nv_encoder;
+	struct drm_encoder *encoder;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct i2c_adapter *adap;
+	struct drm_encoder_funcs *funcs = NULL;
+	struct drm_encoder_helper_funcs *hfuncs = NULL;
+	struct drm_encoder_slave_funcs *sfuncs = NULL;
+	int i2c_index = entry->i2c_index;
+	int type, ret;
+	bool was_locked;
+
+	/* Ensure that we can talk to this encoder */
+	type = nv04_tv_identify(dev, i2c_index);
+	if (type < 0)
+		return type;
+
+	/* Allocate the necessary memory */
+	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+	if (!nv_encoder)
+		return -ENOMEM;
+
+	/* Initialize the common members */
+	encoder = to_drm_encoder(nv_encoder);
+
+	funcs = &nv04_tv_encoder_info[type].funcs;
+	hfuncs = &nv04_tv_encoder_info[type].hfuncs;
+
+	drm_encoder_init(dev, encoder, funcs, DRM_MODE_ENCODER_TVDAC);
+	drm_encoder_helper_add(encoder, hfuncs);
+
+	encoder->possible_crtcs = entry->heads;
+	encoder->possible_clones = 0;
+
+	nv_encoder->dcb = entry;
+	nv_encoder->or = ffs(entry->or) - 1;
+
+	/* Run the slave-specific initialization */
+	adap = &dev_priv->vbios->dcb->i2c[i2c_index].chan->adapter;
+
+	was_locked = NVLockVgaCrtcs(dev, false);
+
+	ret = drm_i2c_encoder_init(encoder->dev, to_encoder_slave(encoder), adap,
+				   &nv04_tv_encoder_info[type].board_info);
+
+	NVLockVgaCrtcs(dev, was_locked);
+
+	if (ret < 0)
+		goto fail;
+
+	/* Fill the function pointers */
+	sfuncs = to_encoder_slave(encoder)->slave_funcs;
+
+	*funcs = (struct drm_encoder_funcs) {
+		.destroy = nv04_tv_destroy,
+	};
+
+	*hfuncs = (struct drm_encoder_helper_funcs) {
+		.dpms = nv04_tv_dpms,
+		.save = sfuncs->save,
+		.restore = sfuncs->restore,
+		.mode_fixup = sfuncs->mode_fixup,
+		.prepare = nv04_tv_prepare,
+		.commit = nv04_tv_commit,
+		.mode_set = nv04_tv_mode_set,
+		.detect = sfuncs->detect,
+	};
+
+	/* Set the slave encoder configuration */
+	sfuncs->set_config(encoder, nv04_tv_encoder_info[type].params);
+
+	return 0;
+
+fail:
+	drm_encoder_cleanup(encoder);
+
+	kfree(nv_encoder);
+	return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
new file mode 100644
index 0000000..79e2d10
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_fb.c
@@ -0,0 +1,24 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv10_fb_init(struct drm_device *dev)
+{
+	uint32_t fb_bar_size;
+	int i;
+
+	fb_bar_size = drm_get_resource_len(dev, 0) - 1;
+	for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
+		nv_wr32(dev, NV10_PFB_TILE(i), 0);
+		nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size);
+	}
+
+	return 0;
+}
+
+void
+nv10_fb_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
new file mode 100644
index 0000000..7aeabf2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+#define NV10_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV10_RAMFC__SIZE))
+#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32)
+
+int
+nv10_fifo_channel_id(struct drm_device *dev)
+{
+	return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
+			NV10_PFIFO_CACHE1_PUSH1_CHID_MASK;
+}
+
+int
+nv10_fifo_create_context(struct nouveau_channel *chan)
+{
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+	struct drm_device *dev = chan->dev;
+	uint32_t fc = NV10_RAMFC(chan->id);
+	int ret;
+
+	ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0,
+				      NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
+				      NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc);
+	if (ret)
+		return ret;
+
+	/* Fill entries that are seen filled in dumps of nvidia driver just
+	 * after channel's is put into DMA mode
+	 */
+	dev_priv->engine.instmem.prepare_access(dev, true);
+	nv_wi32(dev, fc +  0, chan->pushbuf_base);
+	nv_wi32(dev, fc +  4, chan->pushbuf_base);
+	nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
+	nv_wi32(dev, fc + 20, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+			      NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+			      NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
+#ifdef __BIG_ENDIAN
+			      NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+			      0);
+	dev_priv->engine.instmem.finish_access(dev);
+
+	/* enable the fifo dma operation */
+	nv_wr32(dev, NV04_PFIFO_MODE,
+		nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
+	return 0;
+}
+
+void
+nv10_fifo_destroy_context(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+
+	nv_wr32(dev, NV04_PFIFO_MODE,
+			nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
+
+	nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+}
+
+static void
+nv10_fifo_do_load_context(struct drm_device *dev, int chid)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t fc = NV10_RAMFC(chid), tmp;
+
+	dev_priv->engine.instmem.prepare_access(dev, false);
+
+	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
+	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
+	nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
+
+	tmp = nv_ri32(dev, fc + 12);
+	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
+	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
+
+	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 16));
+	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 20));
+	nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 24));
+	nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 28));
+
+	if (dev_priv->chipset < 0x17)
+		goto out;
+
+	nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 32));
+	tmp = nv_ri32(dev, fc + 36);
+	nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp);
+	nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 40));
+	nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 44));
+	nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 48));
+
+out:
+	dev_priv->engine.instmem.finish_access(dev);
+
+	nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
+	nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
+}
+
+int
+nv10_fifo_load_context(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	uint32_t tmp;
+
+	nv10_fifo_do_load_context(dev, chan->id);
+
+	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
+		     NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
+	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
+
+	/* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
+	tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
+	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
+
+	return 0;
+}
+
+int
+nv10_fifo_unload_context(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	uint32_t fc, tmp;
+	int chid;
+
+	chid = pfifo->channel_id(dev);
+	if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
+		return 0;
+	fc = NV10_RAMFC(chid);
+
+	dev_priv->engine.instmem.prepare_access(dev, true);
+
+	nv_wi32(dev, fc +  0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
+	nv_wi32(dev, fc +  4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
+	nv_wi32(dev, fc +  8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
+	tmp  = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE) & 0xFFFF;
+	tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16);
+	nv_wi32(dev, fc + 12, tmp);
+	nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
+	nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
+	nv_wi32(dev, fc + 24, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
+	nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
+
+	if (dev_priv->chipset < 0x17)
+		goto out;
+
+	nv_wi32(dev, fc + 32, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
+	tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
+	nv_wi32(dev, fc + 36, tmp);
+	nv_wi32(dev, fc + 40, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
+	nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE));
+	nv_wi32(dev, fc + 48, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
+
+out:
+	dev_priv->engine.instmem.finish_access(dev);
+
+	nv10_fifo_do_load_context(dev, pfifo->channels - 1);
+	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
+	return 0;
+}
+
+static void
+nv10_fifo_init_reset(struct drm_device *dev)
+{
+	nv_wr32(dev, NV03_PMC_ENABLE,
+		nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
+	nv_wr32(dev, NV03_PMC_ENABLE,
+		nv_rd32(dev, NV03_PMC_ENABLE) |  NV_PMC_ENABLE_PFIFO);
+
+	nv_wr32(dev, 0x003224, 0x000f0078);
+	nv_wr32(dev, 0x002044, 0x0101ffff);
+	nv_wr32(dev, 0x002040, 0x000000ff);
+	nv_wr32(dev, 0x002500, 0x00000000);
+	nv_wr32(dev, 0x003000, 0x00000000);
+	nv_wr32(dev, 0x003050, 0x00000000);
+
+	nv_wr32(dev, 0x003258, 0x00000000);
+	nv_wr32(dev, 0x003210, 0x00000000);
+	nv_wr32(dev, 0x003270, 0x00000000);
+}
+
+static void
+nv10_fifo_init_ramxx(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+				       ((dev_priv->ramht_bits - 9) << 16) |
+				       (dev_priv->ramht_offset >> 8));
+	nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
+
+	if (dev_priv->chipset < 0x17) {
+		nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8);
+	} else {
+		nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc_offset >> 8) |
+					       (1 << 16) /* 64 Bytes entry*/);
+		/* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */
+	}
+}
+
+static void
+nv10_fifo_init_intr(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x002100, 0xffffffff);
+	nv_wr32(dev, 0x002140, 0xffffffff);
+}
+
+int
+nv10_fifo_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	int i;
+
+	nv10_fifo_init_reset(dev);
+	nv10_fifo_init_ramxx(dev);
+
+	nv10_fifo_do_load_context(dev, pfifo->channels - 1);
+	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
+
+	nv10_fifo_init_intr(dev);
+	pfifo->enable(dev);
+	pfifo->reassign(dev, true);
+
+	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+		if (dev_priv->fifos[i]) {
+			uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
+			nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
new file mode 100644
index 0000000..6bf6804
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -0,0 +1,892 @@
+/*
+ * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drm.h"
+#include "nouveau_drv.h"
+
+#define NV10_FIFO_NUMBER 32
+
+struct pipe_state {
+	uint32_t pipe_0x0000[0x040/4];
+	uint32_t pipe_0x0040[0x010/4];
+	uint32_t pipe_0x0200[0x0c0/4];
+	uint32_t pipe_0x4400[0x080/4];
+	uint32_t pipe_0x6400[0x3b0/4];
+	uint32_t pipe_0x6800[0x2f0/4];
+	uint32_t pipe_0x6c00[0x030/4];
+	uint32_t pipe_0x7000[0x130/4];
+	uint32_t pipe_0x7400[0x0c0/4];
+	uint32_t pipe_0x7800[0x0c0/4];
+};
+
+static int nv10_graph_ctx_regs[] = {
+	NV10_PGRAPH_CTX_SWITCH1,
+	NV10_PGRAPH_CTX_SWITCH2,
+	NV10_PGRAPH_CTX_SWITCH3,
+	NV10_PGRAPH_CTX_SWITCH4,
+	NV10_PGRAPH_CTX_SWITCH5,
+	NV10_PGRAPH_CTX_CACHE1,	/* 8 values from 0x400160 to 0x40017c */
+	NV10_PGRAPH_CTX_CACHE2,	/* 8 values from 0x400180 to 0x40019c */
+	NV10_PGRAPH_CTX_CACHE3,	/* 8 values from 0x4001a0 to 0x4001bc */
+	NV10_PGRAPH_CTX_CACHE4,	/* 8 values from 0x4001c0 to 0x4001dc */
+	NV10_PGRAPH_CTX_CACHE5,	/* 8 values from 0x4001e0 to 0x4001fc */
+	0x00400164,
+	0x00400184,
+	0x004001a4,
+	0x004001c4,
+	0x004001e4,
+	0x00400168,
+	0x00400188,
+	0x004001a8,
+	0x004001c8,
+	0x004001e8,
+	0x0040016c,
+	0x0040018c,
+	0x004001ac,
+	0x004001cc,
+	0x004001ec,
+	0x00400170,
+	0x00400190,
+	0x004001b0,
+	0x004001d0,
+	0x004001f0,
+	0x00400174,
+	0x00400194,
+	0x004001b4,
+	0x004001d4,
+	0x004001f4,
+	0x00400178,
+	0x00400198,
+	0x004001b8,
+	0x004001d8,
+	0x004001f8,
+	0x0040017c,
+	0x0040019c,
+	0x004001bc,
+	0x004001dc,
+	0x004001fc,
+	NV10_PGRAPH_CTX_USER,
+	NV04_PGRAPH_DMA_START_0,
+	NV04_PGRAPH_DMA_START_1,
+	NV04_PGRAPH_DMA_LENGTH,
+	NV04_PGRAPH_DMA_MISC,
+	NV10_PGRAPH_DMA_PITCH,
+	NV04_PGRAPH_BOFFSET0,
+	NV04_PGRAPH_BBASE0,
+	NV04_PGRAPH_BLIMIT0,
+	NV04_PGRAPH_BOFFSET1,
+	NV04_PGRAPH_BBASE1,
+	NV04_PGRAPH_BLIMIT1,
+	NV04_PGRAPH_BOFFSET2,
+	NV04_PGRAPH_BBASE2,
+	NV04_PGRAPH_BLIMIT2,
+	NV04_PGRAPH_BOFFSET3,
+	NV04_PGRAPH_BBASE3,
+	NV04_PGRAPH_BLIMIT3,
+	NV04_PGRAPH_BOFFSET4,
+	NV04_PGRAPH_BBASE4,
+	NV04_PGRAPH_BLIMIT4,
+	NV04_PGRAPH_BOFFSET5,
+	NV04_PGRAPH_BBASE5,
+	NV04_PGRAPH_BLIMIT5,
+	NV04_PGRAPH_BPITCH0,
+	NV04_PGRAPH_BPITCH1,
+	NV04_PGRAPH_BPITCH2,
+	NV04_PGRAPH_BPITCH3,
+	NV04_PGRAPH_BPITCH4,
+	NV10_PGRAPH_SURFACE,
+	NV10_PGRAPH_STATE,
+	NV04_PGRAPH_BSWIZZLE2,
+	NV04_PGRAPH_BSWIZZLE5,
+	NV04_PGRAPH_BPIXEL,
+	NV10_PGRAPH_NOTIFY,
+	NV04_PGRAPH_PATT_COLOR0,
+	NV04_PGRAPH_PATT_COLOR1,
+	NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */
+	0x00400904,
+	0x00400908,
+	0x0040090c,
+	0x00400910,
+	0x00400914,
+	0x00400918,
+	0x0040091c,
+	0x00400920,
+	0x00400924,
+	0x00400928,
+	0x0040092c,
+	0x00400930,
+	0x00400934,
+	0x00400938,
+	0x0040093c,
+	0x00400940,
+	0x00400944,
+	0x00400948,
+	0x0040094c,
+	0x00400950,
+	0x00400954,
+	0x00400958,
+	0x0040095c,
+	0x00400960,
+	0x00400964,
+	0x00400968,
+	0x0040096c,
+	0x00400970,
+	0x00400974,
+	0x00400978,
+	0x0040097c,
+	0x00400980,
+	0x00400984,
+	0x00400988,
+	0x0040098c,
+	0x00400990,
+	0x00400994,
+	0x00400998,
+	0x0040099c,
+	0x004009a0,
+	0x004009a4,
+	0x004009a8,
+	0x004009ac,
+	0x004009b0,
+	0x004009b4,
+	0x004009b8,
+	0x004009bc,
+	0x004009c0,
+	0x004009c4,
+	0x004009c8,
+	0x004009cc,
+	0x004009d0,
+	0x004009d4,
+	0x004009d8,
+	0x004009dc,
+	0x004009e0,
+	0x004009e4,
+	0x004009e8,
+	0x004009ec,
+	0x004009f0,
+	0x004009f4,
+	0x004009f8,
+	0x004009fc,
+	NV04_PGRAPH_PATTERN,	/* 2 values from 0x400808 to 0x40080c */
+	0x0040080c,
+	NV04_PGRAPH_PATTERN_SHAPE,
+	NV03_PGRAPH_MONO_COLOR0,
+	NV04_PGRAPH_ROP3,
+	NV04_PGRAPH_CHROMA,
+	NV04_PGRAPH_BETA_AND,
+	NV04_PGRAPH_BETA_PREMULT,
+	0x00400e70,
+	0x00400e74,
+	0x00400e78,
+	0x00400e7c,
+	0x00400e80,
+	0x00400e84,
+	0x00400e88,
+	0x00400e8c,
+	0x00400ea0,
+	0x00400ea4,
+	0x00400ea8,
+	0x00400e90,
+	0x00400e94,
+	0x00400e98,
+	0x00400e9c,
+	NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00-0x400f1c */
+	NV10_PGRAPH_WINDOWCLIP_VERTICAL,   /* 8 values from 0x400f20-0x400f3c */
+	0x00400f04,
+	0x00400f24,
+	0x00400f08,
+	0x00400f28,
+	0x00400f0c,
+	0x00400f2c,
+	0x00400f10,
+	0x00400f30,
+	0x00400f14,
+	0x00400f34,
+	0x00400f18,
+	0x00400f38,
+	0x00400f1c,
+	0x00400f3c,
+	NV10_PGRAPH_XFMODE0,
+	NV10_PGRAPH_XFMODE1,
+	NV10_PGRAPH_GLOBALSTATE0,
+	NV10_PGRAPH_GLOBALSTATE1,
+	NV04_PGRAPH_STORED_FMT,
+	NV04_PGRAPH_SOURCE_COLOR,
+	NV03_PGRAPH_ABS_X_RAM,	/* 32 values from 0x400400 to 0x40047c */
+	NV03_PGRAPH_ABS_Y_RAM,	/* 32 values from 0x400480 to 0x4004fc */
+	0x00400404,
+	0x00400484,
+	0x00400408,
+	0x00400488,
+	0x0040040c,
+	0x0040048c,
+	0x00400410,
+	0x00400490,
+	0x00400414,
+	0x00400494,
+	0x00400418,
+	0x00400498,
+	0x0040041c,
+	0x0040049c,
+	0x00400420,
+	0x004004a0,
+	0x00400424,
+	0x004004a4,
+	0x00400428,
+	0x004004a8,
+	0x0040042c,
+	0x004004ac,
+	0x00400430,
+	0x004004b0,
+	0x00400434,
+	0x004004b4,
+	0x00400438,
+	0x004004b8,
+	0x0040043c,
+	0x004004bc,
+	0x00400440,
+	0x004004c0,
+	0x00400444,
+	0x004004c4,
+	0x00400448,
+	0x004004c8,
+	0x0040044c,
+	0x004004cc,
+	0x00400450,
+	0x004004d0,
+	0x00400454,
+	0x004004d4,
+	0x00400458,
+	0x004004d8,
+	0x0040045c,
+	0x004004dc,
+	0x00400460,
+	0x004004e0,
+	0x00400464,
+	0x004004e4,
+	0x00400468,
+	0x004004e8,
+	0x0040046c,
+	0x004004ec,
+	0x00400470,
+	0x004004f0,
+	0x00400474,
+	0x004004f4,
+	0x00400478,
+	0x004004f8,
+	0x0040047c,
+	0x004004fc,
+	NV03_PGRAPH_ABS_UCLIP_XMIN,
+	NV03_PGRAPH_ABS_UCLIP_XMAX,
+	NV03_PGRAPH_ABS_UCLIP_YMIN,
+	NV03_PGRAPH_ABS_UCLIP_YMAX,
+	0x00400550,
+	0x00400558,
+	0x00400554,
+	0x0040055c,
+	NV03_PGRAPH_ABS_UCLIPA_XMIN,
+	NV03_PGRAPH_ABS_UCLIPA_XMAX,
+	NV03_PGRAPH_ABS_UCLIPA_YMIN,
+	NV03_PGRAPH_ABS_UCLIPA_YMAX,
+	NV03_PGRAPH_ABS_ICLIP_XMAX,
+	NV03_PGRAPH_ABS_ICLIP_YMAX,
+	NV03_PGRAPH_XY_LOGIC_MISC0,
+	NV03_PGRAPH_XY_LOGIC_MISC1,
+	NV03_PGRAPH_XY_LOGIC_MISC2,
+	NV03_PGRAPH_XY_LOGIC_MISC3,
+	NV03_PGRAPH_CLIPX_0,
+	NV03_PGRAPH_CLIPX_1,
+	NV03_PGRAPH_CLIPY_0,
+	NV03_PGRAPH_CLIPY_1,
+	NV10_PGRAPH_COMBINER0_IN_ALPHA,
+	NV10_PGRAPH_COMBINER1_IN_ALPHA,
+	NV10_PGRAPH_COMBINER0_IN_RGB,
+	NV10_PGRAPH_COMBINER1_IN_RGB,
+	NV10_PGRAPH_COMBINER_COLOR0,
+	NV10_PGRAPH_COMBINER_COLOR1,
+	NV10_PGRAPH_COMBINER0_OUT_ALPHA,
+	NV10_PGRAPH_COMBINER1_OUT_ALPHA,
+	NV10_PGRAPH_COMBINER0_OUT_RGB,
+	NV10_PGRAPH_COMBINER1_OUT_RGB,
+	NV10_PGRAPH_COMBINER_FINAL0,
+	NV10_PGRAPH_COMBINER_FINAL1,
+	0x00400e00,
+	0x00400e04,
+	0x00400e08,
+	0x00400e0c,
+	0x00400e10,
+	0x00400e14,
+	0x00400e18,
+	0x00400e1c,
+	0x00400e20,
+	0x00400e24,
+	0x00400e28,
+	0x00400e2c,
+	0x00400e30,
+	0x00400e34,
+	0x00400e38,
+	0x00400e3c,
+	NV04_PGRAPH_PASSTHRU_0,
+	NV04_PGRAPH_PASSTHRU_1,
+	NV04_PGRAPH_PASSTHRU_2,
+	NV10_PGRAPH_DIMX_TEXTURE,
+	NV10_PGRAPH_WDIMX_TEXTURE,
+	NV10_PGRAPH_DVD_COLORFMT,
+	NV10_PGRAPH_SCALED_FORMAT,
+	NV04_PGRAPH_MISC24_0,
+	NV04_PGRAPH_MISC24_1,
+	NV04_PGRAPH_MISC24_2,
+	NV03_PGRAPH_X_MISC,
+	NV03_PGRAPH_Y_MISC,
+	NV04_PGRAPH_VALID1,
+	NV04_PGRAPH_VALID2,
+};
+
+static int nv17_graph_ctx_regs[] = {
+	NV10_PGRAPH_DEBUG_4,
+	0x004006b0,
+	0x00400eac,
+	0x00400eb0,
+	0x00400eb4,
+	0x00400eb8,
+	0x00400ebc,
+	0x00400ec0,
+	0x00400ec4,
+	0x00400ec8,
+	0x00400ecc,
+	0x00400ed0,
+	0x00400ed4,
+	0x00400ed8,
+	0x00400edc,
+	0x00400ee0,
+	0x00400a00,
+	0x00400a04,
+};
+
+struct graph_state {
+	int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)];
+	int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)];
+	struct pipe_state pipe_state;
+};
+
+static void nv10_graph_save_pipe(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+	struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
+	int i;
+#define PIPE_SAVE(addr) \
+	do { \
+		nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
+		for (i = 0; i < ARRAY_SIZE(fifo_pipe_state->pipe_##addr); i++) \
+			fifo_pipe_state->pipe_##addr[i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \
+	} while (0)
+
+	PIPE_SAVE(0x4400);
+	PIPE_SAVE(0x0200);
+	PIPE_SAVE(0x6400);
+	PIPE_SAVE(0x6800);
+	PIPE_SAVE(0x6c00);
+	PIPE_SAVE(0x7000);
+	PIPE_SAVE(0x7400);
+	PIPE_SAVE(0x7800);
+	PIPE_SAVE(0x0040);
+	PIPE_SAVE(0x0000);
+
+#undef PIPE_SAVE
+}
+
+static void nv10_graph_load_pipe(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+	struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
+	int i;
+	uint32_t xfmode0, xfmode1;
+#define PIPE_RESTORE(addr) \
+	do { \
+		nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
+		for (i = 0; i < ARRAY_SIZE(fifo_pipe_state->pipe_##addr); i++) \
+			nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, fifo_pipe_state->pipe_##addr[i]); \
+	} while (0)
+
+
+	nouveau_wait_for_idle(dev);
+	/* XXX check haiku comments */
+	xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
+	xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
+	nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
+	nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
+	nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
+	for (i = 0; i < 4; i++)
+		nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+	for (i = 0; i < 4; i++)
+		nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+	nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
+	for (i = 0; i < 3; i++)
+		nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+
+	nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
+	for (i = 0; i < 3; i++)
+		nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+	nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
+	nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
+
+
+	PIPE_RESTORE(0x0200);
+	nouveau_wait_for_idle(dev);
+
+	/* restore XFMODE */
+	nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
+	nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
+	PIPE_RESTORE(0x6400);
+	PIPE_RESTORE(0x6800);
+	PIPE_RESTORE(0x6c00);
+	PIPE_RESTORE(0x7000);
+	PIPE_RESTORE(0x7400);
+	PIPE_RESTORE(0x7800);
+	PIPE_RESTORE(0x4400);
+	PIPE_RESTORE(0x0000);
+	PIPE_RESTORE(0x0040);
+	nouveau_wait_for_idle(dev);
+
+#undef PIPE_RESTORE
+}
+
+static void nv10_graph_create_pipe(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+	struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
+	uint32_t *fifo_pipe_state_addr;
+	int i;
+#define PIPE_INIT(addr) \
+	do { \
+		fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \
+	} while (0)
+#define PIPE_INIT_END(addr) \
+	do { \
+		uint32_t *__end_addr = fifo_pipe_state->pipe_##addr + \
+				ARRAY_SIZE(fifo_pipe_state->pipe_##addr); \
+		if (fifo_pipe_state_addr != __end_addr) \
+			NV_ERROR(dev, "incomplete pipe init for 0x%x :  %p/%p\n", \
+				addr, fifo_pipe_state_addr, __end_addr); \
+	} while (0)
+#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value
+
+	PIPE_INIT(0x0200);
+	for (i = 0; i < 48; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x0200);
+
+	PIPE_INIT(0x6400);
+	for (i = 0; i < 211; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x3f800000);
+	NV_WRITE_PIPE_INIT(0x40000000);
+	NV_WRITE_PIPE_INIT(0x40000000);
+	NV_WRITE_PIPE_INIT(0x40000000);
+	NV_WRITE_PIPE_INIT(0x40000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x3f800000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x3f000000);
+	NV_WRITE_PIPE_INIT(0x3f000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x3f800000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x3f800000);
+	NV_WRITE_PIPE_INIT(0x3f800000);
+	NV_WRITE_PIPE_INIT(0x3f800000);
+	NV_WRITE_PIPE_INIT(0x3f800000);
+	PIPE_INIT_END(0x6400);
+
+	PIPE_INIT(0x6800);
+	for (i = 0; i < 162; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x3f800000);
+	for (i = 0; i < 25; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x6800);
+
+	PIPE_INIT(0x6c00);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0xbf800000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x6c00);
+
+	PIPE_INIT(0x7000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x7149f2ca);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x7149f2ca);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x7149f2ca);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x7149f2ca);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x7149f2ca);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x7149f2ca);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x7149f2ca);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x7149f2ca);
+	for (i = 0; i < 35; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x7000);
+
+	PIPE_INIT(0x7400);
+	for (i = 0; i < 48; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x7400);
+
+	PIPE_INIT(0x7800);
+	for (i = 0; i < 48; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x7800);
+
+	PIPE_INIT(0x4400);
+	for (i = 0; i < 32; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x4400);
+
+	PIPE_INIT(0x0000);
+	for (i = 0; i < 16; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x0000);
+
+	PIPE_INIT(0x0040);
+	for (i = 0; i < 4; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x0040);
+
+#undef PIPE_INIT
+#undef PIPE_INIT_END
+#undef NV_WRITE_PIPE_INIT
+}
+
+static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) {
+		if (nv10_graph_ctx_regs[i] == reg)
+			return i;
+	}
+	NV_ERROR(dev, "unknow offset nv10_ctx_regs %d\n", reg);
+	return -1;
+}
+
+static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) {
+		if (nv17_graph_ctx_regs[i] == reg)
+			return i;
+	}
+	NV_ERROR(dev, "unknow offset nv17_ctx_regs %d\n", reg);
+	return -1;
+}
+
+int nv10_graph_load_context(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+	uint32_t tmp;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
+		nv_wr32(dev, nv10_graph_ctx_regs[i], pgraph_ctx->nv10[i]);
+	if (dev_priv->chipset >= 0x17) {
+		for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
+			nv_wr32(dev, nv17_graph_ctx_regs[i],
+						pgraph_ctx->nv17[i]);
+	}
+
+	nv10_graph_load_pipe(chan);
+
+	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+	tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER);
+	nv_wr32(dev, NV10_PGRAPH_CTX_USER, (tmp & 0xffffff) | chan->id << 24);
+	tmp = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2);
+	nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, tmp & 0xcfffffff);
+	return 0;
+}
+
+int
+nv10_graph_unload_context(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	struct nouveau_channel *chan;
+	struct graph_state *ctx;
+	uint32_t tmp;
+	int i;
+
+	chan = pgraph->channel(dev);
+	if (!chan)
+		return 0;
+	ctx = chan->pgraph_ctx;
+
+	for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
+		ctx->nv10[i] = nv_rd32(dev, nv10_graph_ctx_regs[i]);
+
+	if (dev_priv->chipset >= 0x17) {
+		for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
+			ctx->nv17[i] = nv_rd32(dev, nv17_graph_ctx_regs[i]);
+	}
+
+	nv10_graph_save_pipe(chan);
+
+	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
+	tmp  = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
+	tmp |= (pfifo->channels - 1) << 24;
+	nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
+	return 0;
+}
+
+void
+nv10_graph_context_switch(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nouveau_channel *chan = NULL;
+	int chid;
+
+	pgraph->fifo_access(dev, false);
+	nouveau_wait_for_idle(dev);
+
+	/* If previous context is valid, we need to save it */
+	nv10_graph_unload_context(dev);
+
+	/* Load context for next channel */
+	chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
+	chan = dev_priv->fifos[chid];
+	if (chan)
+		nv10_graph_load_context(chan);
+
+	pgraph->fifo_access(dev, true);
+}
+
+#define NV_WRITE_CTX(reg, val) do { \
+	int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \
+	if (offset > 0) \
+		pgraph_ctx->nv10[offset] = val; \
+	} while (0)
+
+#define NV17_WRITE_CTX(reg, val) do { \
+	int offset = nv17_graph_ctx_regs_find_offset(dev, reg); \
+	if (offset > 0) \
+		pgraph_ctx->nv17[offset] = val; \
+	} while (0)
+
+struct nouveau_channel *
+nv10_graph_channel(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int chid = dev_priv->engine.fifo.channels;
+
+	if (nv_rd32(dev, NV10_PGRAPH_CTX_CONTROL) & 0x00010000)
+		chid = nv_rd32(dev, NV10_PGRAPH_CTX_USER) >> 24;
+
+	if (chid >= dev_priv->engine.fifo.channels)
+		return NULL;
+
+	return dev_priv->fifos[chid];
+}
+
+int nv10_graph_create_context(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct graph_state *pgraph_ctx;
+
+	NV_DEBUG(dev, "nv10_graph_context_create %d\n", chan->id);
+
+	chan->pgraph_ctx = pgraph_ctx = kzalloc(sizeof(*pgraph_ctx),
+						GFP_KERNEL);
+	if (pgraph_ctx == NULL)
+		return -ENOMEM;
+
+
+	NV_WRITE_CTX(0x00400e88, 0x08000000);
+	NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
+	NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
+	NV_WRITE_CTX(0x00400e10, 0x00001000);
+	NV_WRITE_CTX(0x00400e14, 0x00001000);
+	NV_WRITE_CTX(0x00400e30, 0x00080008);
+	NV_WRITE_CTX(0x00400e34, 0x00080008);
+	if (dev_priv->chipset >= 0x17) {
+		/* is it really needed ??? */
+		NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
+					nv_rd32(dev, NV10_PGRAPH_DEBUG_4));
+		NV17_WRITE_CTX(0x004006b0, nv_rd32(dev, 0x004006b0));
+		NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
+		NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
+		NV17_WRITE_CTX(0x00400ec0, 0x00000080);
+		NV17_WRITE_CTX(0x00400ed0, 0x00000080);
+	}
+	NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24);
+
+	nv10_graph_create_pipe(chan);
+	return 0;
+}
+
+void nv10_graph_destroy_context(struct nouveau_channel *chan)
+{
+	struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+
+	kfree(pgraph_ctx);
+	chan->pgraph_ctx = NULL;
+}
+
+int nv10_graph_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t tmp;
+	int i;
+
+	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
+			~NV_PMC_ENABLE_PGRAPH);
+	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
+			 NV_PMC_ENABLE_PGRAPH);
+
+	nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
+	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
+	nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
+	/* nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
+	nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
+	nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0x55DE0830 |
+				      (1<<29) |
+				      (1<<31));
+	if (dev_priv->chipset >= 0x17) {
+		nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x1f000000);
+		nv_wr32(dev, 0x004006b0, 0x40000020);
+	} else
+		nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
+
+	/* copy tile info from PFB */
+	for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
+		nv_wr32(dev, NV10_PGRAPH_TILE(i),
+					nv_rd32(dev, NV10_PFB_TILE(i)));
+		nv_wr32(dev, NV10_PGRAPH_TLIMIT(i),
+					nv_rd32(dev, NV10_PFB_TLIMIT(i)));
+		nv_wr32(dev, NV10_PGRAPH_TSIZE(i),
+					nv_rd32(dev, NV10_PFB_TSIZE(i)));
+		nv_wr32(dev, NV10_PGRAPH_TSTATUS(i),
+					nv_rd32(dev, NV10_PFB_TSTATUS(i)));
+	}
+
+	nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH1, 0x00000000);
+	nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH2, 0x00000000);
+	nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH3, 0x00000000);
+	nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH4, 0x00000000);
+	nv_wr32(dev, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
+
+	tmp  = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
+	tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
+	nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
+	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+	nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
+
+	return 0;
+}
+
+void nv10_graph_takedown(struct drm_device *dev)
+{
+}
+
+struct nouveau_pgraph_object_class nv10_graph_grclass[] = {
+	{ 0x0030, false, NULL }, /* null */
+	{ 0x0039, false, NULL }, /* m2mf */
+	{ 0x004a, false, NULL }, /* gdirect */
+	{ 0x005f, false, NULL }, /* imageblit */
+	{ 0x009f, false, NULL }, /* imageblit (nv12) */
+	{ 0x008a, false, NULL }, /* ifc */
+	{ 0x0089, false, NULL }, /* sifm */
+	{ 0x0062, false, NULL }, /* surf2d */
+	{ 0x0043, false, NULL }, /* rop */
+	{ 0x0012, false, NULL }, /* beta1 */
+	{ 0x0072, false, NULL }, /* beta4 */
+	{ 0x0019, false, NULL }, /* cliprect */
+	{ 0x0044, false, NULL }, /* pattern */
+	{ 0x0052, false, NULL }, /* swzsurf */
+	{ 0x0093, false, NULL }, /* surf3d */
+	{ 0x0094, false, NULL }, /* tex_tri */
+	{ 0x0095, false, NULL }, /* multitex_tri */
+	{ 0x0056, false, NULL }, /* celcius (nv10) */
+	{ 0x0096, false, NULL }, /* celcius (nv11) */
+	{ 0x0099, false, NULL }, /* celcius (nv17) */
+	{}
+};
diff --git a/drivers/gpu/drm/nouveau/nv17_gpio.c b/drivers/gpu/drm/nouveau/nv17_gpio.c
new file mode 100644
index 0000000..2e58c33
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_gpio.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+
+static bool
+get_gpio_location(struct dcb_gpio_entry *ent, uint32_t *reg, uint32_t *shift,
+		  uint32_t *mask)
+{
+	if (ent->line < 2) {
+		*reg = NV_PCRTC_GPIO;
+		*shift = ent->line * 16;
+		*mask = 0x11;
+
+	} else if (ent->line < 10) {
+		*reg = NV_PCRTC_GPIO_EXT;
+		*shift = (ent->line - 2) * 4;
+		*mask = 0x3;
+
+	} else if (ent->line < 14) {
+		*reg = NV_PCRTC_850;
+		*shift = (ent->line - 10) * 4;
+		*mask = 0x3;
+
+	} else {
+		return false;
+	}
+
+	return true;
+}
+
+int
+nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
+{
+	struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
+	uint32_t reg, shift, mask, value;
+
+	if (!ent)
+		return -ENODEV;
+
+	if (!get_gpio_location(ent, &reg, &shift, &mask))
+		return -ENODEV;
+
+	value = NVReadCRTC(dev, 0, reg) >> shift;
+
+	return (ent->invert ? 1 : 0) ^ (value & 1);
+}
+
+int
+nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
+{
+	struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
+	uint32_t reg, shift, mask, value;
+
+	if (!ent)
+		return -ENODEV;
+
+	if (!get_gpio_location(ent, &reg, &shift, &mask))
+		return -ENODEV;
+
+	value = ((ent->invert ? 1 : 0) ^ (state ? 1 : 0)) << shift;
+	mask = ~(mask << shift);
+
+	NVWriteCRTC(dev, 0, reg, value | (NVReadCRTC(dev, 0, reg) & mask));
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
new file mode 100644
index 0000000..46cfd9c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -0,0 +1,681 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+#include "nouveau_drv.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_crtc.h"
+#include "nouveau_hw.h"
+#include "nv17_tv.h"
+
+enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
+					 struct drm_connector *connector,
+					 uint32_t pin_mask)
+{
+	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+
+	tv_enc->pin_mask = pin_mask >> 28 & 0xe;
+
+	switch (tv_enc->pin_mask) {
+	case 0x2:
+	case 0x4:
+		tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Composite;
+		break;
+	case 0xc:
+		tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO;
+		break;
+	case 0xe:
+		if (nouveau_encoder(encoder)->dcb->tvconf.has_component_output)
+			tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component;
+		else
+			tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART;
+		break;
+	default:
+		tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
+		break;
+	}
+
+	drm_connector_property_set_value(connector,
+			encoder->dev->mode_config.tv_subconnector_property,
+							tv_enc->subconnector);
+
+	return tv_enc->subconnector ? connector_status_connected :
+					connector_status_disconnected;
+}
+
+static const struct {
+	int hdisplay;
+	int vdisplay;
+} modes[] = {
+	{ 640, 400 },
+	{ 640, 480 },
+	{ 720, 480 },
+	{ 720, 576 },
+	{ 800, 600 },
+	{ 1024, 768 },
+	{ 1280, 720 },
+	{ 1280, 1024 },
+	{ 1920, 1080 }
+};
+
+static int nv17_tv_get_modes(struct drm_encoder *encoder,
+			     struct drm_connector *connector)
+{
+	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+	struct drm_display_mode *mode;
+	struct drm_display_mode *output_mode;
+	int n = 0;
+	int i;
+
+	if (tv_norm->kind != CTV_ENC_MODE) {
+		struct drm_display_mode *tv_mode;
+
+		for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) {
+			mode = drm_mode_duplicate(encoder->dev, tv_mode);
+
+			mode->clock = tv_norm->tv_enc_mode.vrefresh *
+						mode->htotal / 1000 *
+						mode->vtotal / 1000;
+
+			if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+				mode->clock *= 2;
+
+			if (mode->hdisplay == tv_norm->tv_enc_mode.hdisplay &&
+			    mode->vdisplay == tv_norm->tv_enc_mode.vdisplay)
+				mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+			drm_mode_probed_add(connector, mode);
+			n++;
+		}
+		return n;
+	}
+
+	/* tv_norm->kind == CTV_ENC_MODE */
+	output_mode = &tv_norm->ctv_enc_mode.mode;
+	for (i = 0; i < ARRAY_SIZE(modes); i++) {
+		if (modes[i].hdisplay > output_mode->hdisplay ||
+		    modes[i].vdisplay > output_mode->vdisplay)
+			continue;
+
+		if (modes[i].hdisplay == output_mode->hdisplay &&
+		    modes[i].vdisplay == output_mode->vdisplay) {
+			mode = drm_mode_duplicate(encoder->dev, output_mode);
+			mode->type |= DRM_MODE_TYPE_PREFERRED;
+		} else {
+			mode = drm_cvt_mode(encoder->dev, modes[i].hdisplay,
+				modes[i].vdisplay, 60, false,
+				output_mode->flags & DRM_MODE_FLAG_INTERLACE,
+				false);
+		}
+
+		/* CVT modes are sometimes unsuitable... */
+		if (output_mode->hdisplay <= 720
+		    || output_mode->hdisplay >= 1920) {
+			mode->htotal = output_mode->htotal;
+			mode->hsync_start = (mode->hdisplay + (mode->htotal
+					     - mode->hdisplay) * 9 / 10) & ~7;
+			mode->hsync_end = mode->hsync_start + 8;
+		}
+		if (output_mode->vdisplay >= 1024) {
+			mode->vtotal = output_mode->vtotal;
+			mode->vsync_start = output_mode->vsync_start;
+			mode->vsync_end = output_mode->vsync_end;
+		}
+
+		mode->type |= DRM_MODE_TYPE_DRIVER;
+		drm_mode_probed_add(connector, mode);
+		n++;
+	}
+	return n;
+}
+
+static int nv17_tv_mode_valid(struct drm_encoder *encoder,
+			      struct drm_display_mode *mode)
+{
+	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+
+	if (tv_norm->kind == CTV_ENC_MODE) {
+		struct drm_display_mode *output_mode =
+						&tv_norm->ctv_enc_mode.mode;
+
+		if (mode->clock > 400000)
+			return MODE_CLOCK_HIGH;
+
+		if (mode->hdisplay > output_mode->hdisplay ||
+		    mode->vdisplay > output_mode->vdisplay)
+			return MODE_BAD;
+
+		if ((mode->flags & DRM_MODE_FLAG_INTERLACE) !=
+		    (output_mode->flags & DRM_MODE_FLAG_INTERLACE))
+			return MODE_NO_INTERLACE;
+
+		if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+			return MODE_NO_DBLESCAN;
+
+	} else {
+		const int vsync_tolerance = 600;
+
+		if (mode->clock > 70000)
+			return MODE_CLOCK_HIGH;
+
+		if (abs(drm_mode_vrefresh(mode) * 1000 -
+			tv_norm->tv_enc_mode.vrefresh) > vsync_tolerance)
+			return MODE_VSYNC;
+
+		/* The encoder takes care of the actual interlacing */
+		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+			return MODE_NO_INTERLACE;
+	}
+
+	return MODE_OK;
+}
+
+static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
+{
+	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+
+	if (tv_norm->kind == CTV_ENC_MODE)
+		adjusted_mode->clock = tv_norm->ctv_enc_mode.mode.clock;
+	else
+		adjusted_mode->clock = 90000;
+
+	return true;
+}
+
+static void  nv17_tv_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
+	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+
+	if (nouveau_encoder(encoder)->last_dpms == mode)
+		return;
+	nouveau_encoder(encoder)->last_dpms = mode;
+
+	NV_TRACE(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
+		 mode, nouveau_encoder(encoder)->dcb->index);
+
+	regs->ptv_200 &= ~1;
+
+	if (tv_norm->kind == CTV_ENC_MODE) {
+		nv04_dfp_update_fp_control(encoder, mode);
+
+	} else {
+		nv04_dfp_update_fp_control(encoder, DRM_MODE_DPMS_OFF);
+
+		if (mode == DRM_MODE_DPMS_ON)
+			regs->ptv_200 |= 1;
+	}
+
+	nv_load_ptv(dev, regs, 200);
+
+	nv17_gpio_set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
+	nv17_gpio_set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
+
+	nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
+}
+
+static void nv17_tv_prepare(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+	int head = nouveau_crtc(encoder->crtc)->index;
+	uint8_t *cr_lcd = &dev_priv->mode_reg.crtc_reg[head].CRTC[
+							NV_CIO_CRE_LCD__INDEX];
+	uint32_t dacclk_off = NV_PRAMDAC_DACCLK +
+					nv04_dac_output_offset(encoder);
+	uint32_t dacclk;
+
+	helper->dpms(encoder, DRM_MODE_DPMS_OFF);
+
+	nv04_dfp_disable(dev, head);
+
+	/* Unbind any FP encoders from this head if we need the FP
+	 * stuff enabled. */
+	if (tv_norm->kind == CTV_ENC_MODE) {
+		struct drm_encoder *enc;
+
+		list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+			struct dcb_entry *dcb = nouveau_encoder(enc)->dcb;
+
+			if ((dcb->type == OUTPUT_TMDS ||
+			     dcb->type == OUTPUT_LVDS) &&
+			     !enc->crtc &&
+			     nv04_dfp_get_bound_head(dev, dcb) == head) {
+				nv04_dfp_bind_head(dev, dcb, head ^ 1,
+						dev_priv->VBIOS.fp.dual_link);
+			}
+		}
+
+	}
+
+	/* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
+	 * at LCD__INDEX which we don't alter
+	 */
+	if (!(*cr_lcd & 0x44)) {
+		if (tv_norm->kind == CTV_ENC_MODE)
+			*cr_lcd = 0x1 | (head ? 0x0 : 0x8);
+		else
+			*cr_lcd = 0;
+	}
+
+	/* Set the DACCLK register */
+	dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1;
+
+	if (dev_priv->card_type == NV_40)
+		dacclk |= 0x1a << 16;
+
+	if (tv_norm->kind == CTV_ENC_MODE) {
+		dacclk |=  0x20;
+
+		if (head)
+			dacclk |= 0x100;
+		else
+			dacclk &= ~0x100;
+
+	} else {
+		dacclk |= 0x10;
+
+	}
+
+	NVWriteRAMDAC(dev, 0, dacclk_off, dacclk);
+}
+
+static void nv17_tv_mode_set(struct drm_encoder *encoder,
+			     struct drm_display_mode *drm_mode,
+			     struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int head = nouveau_crtc(encoder->crtc)->index;
+	struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head];
+	struct nv17_tv_state *tv_regs = &to_tv_enc(encoder)->state;
+	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+	int i;
+
+	regs->CRTC[NV_CIO_CRE_53] = 0x40; /* FP_HTIMING */
+	regs->CRTC[NV_CIO_CRE_54] = 0; /* FP_VTIMING */
+	regs->ramdac_630 = 0x2; /* turn off green mode (tv test pattern?) */
+	regs->tv_setup = 1;
+	regs->ramdac_8c0 = 0x0;
+
+	if (tv_norm->kind == TV_ENC_MODE) {
+		tv_regs->ptv_200 = 0x13111100;
+		if (head)
+			tv_regs->ptv_200 |= 0x10;
+
+		tv_regs->ptv_20c = 0x808010;
+		tv_regs->ptv_304 = 0x2d00000;
+		tv_regs->ptv_600 = 0x0;
+		tv_regs->ptv_60c = 0x0;
+		tv_regs->ptv_610 = 0x1e00000;
+
+		if (tv_norm->tv_enc_mode.vdisplay == 576) {
+			tv_regs->ptv_508 = 0x1200000;
+			tv_regs->ptv_614 = 0x33;
+
+		} else if (tv_norm->tv_enc_mode.vdisplay == 480) {
+			tv_regs->ptv_508 = 0xf00000;
+			tv_regs->ptv_614 = 0x13;
+		}
+
+		if (dev_priv->card_type >= NV_30) {
+			tv_regs->ptv_500 = 0xe8e0;
+			tv_regs->ptv_504 = 0x1710;
+			tv_regs->ptv_604 = 0x0;
+			tv_regs->ptv_608 = 0x0;
+		} else {
+			if (tv_norm->tv_enc_mode.vdisplay == 576) {
+				tv_regs->ptv_604 = 0x20;
+				tv_regs->ptv_608 = 0x10;
+				tv_regs->ptv_500 = 0x19710;
+				tv_regs->ptv_504 = 0x68f0;
+
+			} else if (tv_norm->tv_enc_mode.vdisplay == 480) {
+				tv_regs->ptv_604 = 0x10;
+				tv_regs->ptv_608 = 0x20;
+				tv_regs->ptv_500 = 0x4b90;
+				tv_regs->ptv_504 = 0x1b480;
+			}
+		}
+
+		for (i = 0; i < 0x40; i++)
+			tv_regs->tv_enc[i] = tv_norm->tv_enc_mode.tv_enc[i];
+
+	} else {
+		struct drm_display_mode *output_mode =
+						&tv_norm->ctv_enc_mode.mode;
+
+		/* The registers in PRAMDAC+0xc00 control some timings and CSC
+		 * parameters for the CTV encoder (It's only used for "HD" TV
+		 * modes, I don't think I have enough working to guess what
+		 * they exactly mean...), it's probably connected at the
+		 * output of the FP encoder, but it also needs the analog
+		 * encoder in its OR enabled and routed to the head it's
+		 * using. It's enabled with the DACCLK register, bits [5:4].
+		 */
+		for (i = 0; i < 38; i++)
+			regs->ctv_regs[i] = tv_norm->ctv_enc_mode.ctv_regs[i];
+
+		regs->fp_horiz_regs[FP_DISPLAY_END] = output_mode->hdisplay - 1;
+		regs->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
+		regs->fp_horiz_regs[FP_SYNC_START] =
+						output_mode->hsync_start - 1;
+		regs->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
+		regs->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay +
+			max((output_mode->hdisplay-600)/40 - 1, 1);
+
+		regs->fp_vert_regs[FP_DISPLAY_END] = output_mode->vdisplay - 1;
+		regs->fp_vert_regs[FP_TOTAL] = output_mode->vtotal - 1;
+		regs->fp_vert_regs[FP_SYNC_START] =
+						output_mode->vsync_start - 1;
+		regs->fp_vert_regs[FP_SYNC_END] = output_mode->vsync_end - 1;
+		regs->fp_vert_regs[FP_CRTC] = output_mode->vdisplay - 1;
+
+		regs->fp_control = NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
+			NV_PRAMDAC_FP_TG_CONTROL_READ_PROG |
+			NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12;
+
+		if (output_mode->flags & DRM_MODE_FLAG_PVSYNC)
+			regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS;
+		if (output_mode->flags & DRM_MODE_FLAG_PHSYNC)
+			regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS;
+
+		regs->fp_debug_0 = NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND |
+			NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND |
+			NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR |
+			NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR |
+			NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED |
+			NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE |
+			NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE;
+
+		regs->fp_debug_2 = 0;
+
+		regs->fp_margin_color = 0x801080;
+
+	}
+}
+
+static void nv17_tv_commit(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+
+	if (get_tv_norm(encoder)->kind == TV_ENC_MODE) {
+		nv17_tv_update_rescaler(encoder);
+		nv17_tv_update_properties(encoder);
+	} else {
+		nv17_ctv_update_rescaler(encoder);
+	}
+
+	nv17_tv_state_load(dev, &to_tv_enc(encoder)->state);
+
+	/* This could use refinement for flatpanels, but it should work */
+	if (dev_priv->chipset < 0x44)
+		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
+					nv04_dac_output_offset(encoder),
+					0xf0000000);
+	else
+		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
+					nv04_dac_output_offset(encoder),
+					0x00100000);
+
+	helper->dpms(encoder, DRM_MODE_DPMS_ON);
+
+	NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
+		drm_get_connector_name(
+			&nouveau_encoder_connector_get(nv_encoder)->base),
+		nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+}
+
+static void nv17_tv_save(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+
+	nouveau_encoder(encoder)->restore.output =
+					NVReadRAMDAC(dev, 0,
+					NV_PRAMDAC_DACCLK +
+					nv04_dac_output_offset(encoder));
+
+	nv17_tv_state_save(dev, &tv_enc->saved_state);
+
+	tv_enc->state.ptv_200 = tv_enc->saved_state.ptv_200;
+}
+
+static void nv17_tv_restore(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK +
+				nv04_dac_output_offset(encoder),
+				nouveau_encoder(encoder)->restore.output);
+
+	nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
+}
+
+static int nv17_tv_create_resources(struct drm_encoder *encoder,
+				    struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_mode_config *conf = &dev->mode_config;
+	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+	struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+	int num_tv_norms = dcb->tvconf.has_component_output ? NUM_TV_NORMS :
+							NUM_LD_TV_NORMS;
+	int i;
+
+	if (nouveau_tv_norm) {
+		for (i = 0; i < num_tv_norms; i++) {
+			if (!strcmp(nv17_tv_norm_names[i], nouveau_tv_norm)) {
+				tv_enc->tv_norm = i;
+				break;
+			}
+		}
+
+		if (i == num_tv_norms)
+			NV_WARN(dev, "Invalid TV norm setting \"%s\"\n",
+				nouveau_tv_norm);
+	}
+
+	drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names);
+
+	drm_connector_attach_property(connector,
+					conf->tv_select_subconnector_property,
+					tv_enc->select_subconnector);
+	drm_connector_attach_property(connector,
+					conf->tv_subconnector_property,
+					tv_enc->subconnector);
+	drm_connector_attach_property(connector,
+					conf->tv_mode_property,
+					tv_enc->tv_norm);
+	drm_connector_attach_property(connector,
+					conf->tv_flicker_reduction_property,
+					tv_enc->flicker);
+	drm_connector_attach_property(connector,
+					conf->tv_saturation_property,
+					tv_enc->saturation);
+	drm_connector_attach_property(connector,
+					conf->tv_hue_property,
+					tv_enc->hue);
+	drm_connector_attach_property(connector,
+					conf->tv_overscan_property,
+					tv_enc->overscan);
+
+	return 0;
+}
+
+static int nv17_tv_set_property(struct drm_encoder *encoder,
+				struct drm_connector *connector,
+				struct drm_property *property,
+				uint64_t val)
+{
+	struct drm_mode_config *conf = &encoder->dev->mode_config;
+	struct drm_crtc *crtc = encoder->crtc;
+	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+	bool modes_changed = false;
+
+	if (property == conf->tv_overscan_property) {
+		tv_enc->overscan = val;
+		if (encoder->crtc) {
+			if (tv_norm->kind == CTV_ENC_MODE)
+				nv17_ctv_update_rescaler(encoder);
+			else
+				nv17_tv_update_rescaler(encoder);
+		}
+
+	} else if (property == conf->tv_saturation_property) {
+		if (tv_norm->kind != TV_ENC_MODE)
+			return -EINVAL;
+
+		tv_enc->saturation = val;
+		nv17_tv_update_properties(encoder);
+
+	} else if (property == conf->tv_hue_property) {
+		if (tv_norm->kind != TV_ENC_MODE)
+			return -EINVAL;
+
+		tv_enc->hue = val;
+		nv17_tv_update_properties(encoder);
+
+	} else if (property == conf->tv_flicker_reduction_property) {
+		if (tv_norm->kind != TV_ENC_MODE)
+			return -EINVAL;
+
+		tv_enc->flicker = val;
+		if (encoder->crtc)
+			nv17_tv_update_rescaler(encoder);
+
+	} else if (property == conf->tv_mode_property) {
+		if (connector->dpms != DRM_MODE_DPMS_OFF)
+			return -EINVAL;
+
+		tv_enc->tv_norm = val;
+
+		modes_changed = true;
+
+	} else if (property == conf->tv_select_subconnector_property) {
+		if (tv_norm->kind != TV_ENC_MODE)
+			return -EINVAL;
+
+		tv_enc->select_subconnector = val;
+		nv17_tv_update_properties(encoder);
+
+	} else {
+		return -EINVAL;
+	}
+
+	if (modes_changed) {
+		drm_helper_probe_single_connector_modes(connector, 0, 0);
+
+		/* Disable the crtc to ensure a full modeset is
+		 * performed whenever it's turned on again. */
+		if (crtc) {
+			struct drm_mode_set modeset = {
+				.crtc = crtc,
+			};
+
+			crtc->funcs->set_config(&modeset);
+		}
+	}
+
+	return 0;
+}
+
+static void nv17_tv_destroy(struct drm_encoder *encoder)
+{
+	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+
+	NV_DEBUG(encoder->dev, "\n");
+
+	drm_encoder_cleanup(encoder);
+	kfree(tv_enc);
+}
+
+static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = {
+	.dpms = nv17_tv_dpms,
+	.save = nv17_tv_save,
+	.restore = nv17_tv_restore,
+	.mode_fixup = nv17_tv_mode_fixup,
+	.prepare = nv17_tv_prepare,
+	.commit = nv17_tv_commit,
+	.mode_set = nv17_tv_mode_set,
+	.detect = nv17_dac_detect,
+};
+
+static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = {
+	.get_modes = nv17_tv_get_modes,
+	.mode_valid = nv17_tv_mode_valid,
+	.create_resources = nv17_tv_create_resources,
+	.set_property = nv17_tv_set_property,
+};
+
+static struct drm_encoder_funcs nv17_tv_funcs = {
+	.destroy = nv17_tv_destroy,
+};
+
+int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry)
+{
+	struct drm_encoder *encoder;
+	struct nv17_tv_encoder *tv_enc = NULL;
+
+	tv_enc = kzalloc(sizeof(*tv_enc), GFP_KERNEL);
+	if (!tv_enc)
+		return -ENOMEM;
+
+	tv_enc->overscan = 50;
+	tv_enc->flicker = 50;
+	tv_enc->saturation = 50;
+	tv_enc->hue = 0;
+	tv_enc->tv_norm = TV_NORM_PAL;
+	tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
+	tv_enc->select_subconnector = DRM_MODE_SUBCONNECTOR_Automatic;
+	tv_enc->pin_mask = 0;
+
+	encoder = to_drm_encoder(&tv_enc->base);
+
+	tv_enc->base.dcb = entry;
+	tv_enc->base.or = ffs(entry->or) - 1;
+
+	drm_encoder_init(dev, encoder, &nv17_tv_funcs, DRM_MODE_ENCODER_TVDAC);
+	drm_encoder_helper_add(encoder, &nv17_tv_helper_funcs);
+	to_encoder_slave(encoder)->slave_funcs = &nv17_tv_slave_funcs;
+
+	encoder->possible_crtcs = entry->heads;
+	encoder->possible_clones = 0;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.h b/drivers/gpu/drm/nouveau/nv17_tv.h
new file mode 100644
index 0000000..c00977c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_tv.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NV17_TV_H__
+#define __NV17_TV_H__
+
+struct nv17_tv_state {
+	uint8_t tv_enc[0x40];
+
+	uint32_t hfilter[4][7];
+	uint32_t hfilter2[4][7];
+	uint32_t vfilter[4][7];
+
+	uint32_t ptv_200;
+	uint32_t ptv_204;
+	uint32_t ptv_208;
+	uint32_t ptv_20c;
+	uint32_t ptv_304;
+	uint32_t ptv_500;
+	uint32_t ptv_504;
+	uint32_t ptv_508;
+	uint32_t ptv_600;
+	uint32_t ptv_604;
+	uint32_t ptv_608;
+	uint32_t ptv_60c;
+	uint32_t ptv_610;
+	uint32_t ptv_614;
+};
+
+enum nv17_tv_norm{
+	TV_NORM_PAL,
+	TV_NORM_PAL_M,
+	TV_NORM_PAL_N,
+	TV_NORM_PAL_NC,
+	TV_NORM_NTSC_M,
+	TV_NORM_NTSC_J,
+	NUM_LD_TV_NORMS,
+	TV_NORM_HD480I = NUM_LD_TV_NORMS,
+	TV_NORM_HD480P,
+	TV_NORM_HD576I,
+	TV_NORM_HD576P,
+	TV_NORM_HD720P,
+	TV_NORM_HD1080I,
+	NUM_TV_NORMS
+};
+
+struct nv17_tv_encoder {
+	struct nouveau_encoder base;
+
+	struct nv17_tv_state state;
+	struct nv17_tv_state saved_state;
+
+	int overscan;
+	int flicker;
+	int saturation;
+	int hue;
+	enum nv17_tv_norm tv_norm;
+	int subconnector;
+	int select_subconnector;
+	uint32_t pin_mask;
+};
+#define to_tv_enc(x) container_of(nouveau_encoder(x),		\
+				  struct nv17_tv_encoder, base)
+
+extern char *nv17_tv_norm_names[NUM_TV_NORMS];
+
+extern struct nv17_tv_norm_params {
+	enum {
+		TV_ENC_MODE,
+		CTV_ENC_MODE,
+	} kind;
+
+	union {
+		struct {
+			int hdisplay;
+			int vdisplay;
+			int vrefresh; /* mHz */
+
+			uint8_t tv_enc[0x40];
+		} tv_enc_mode;
+
+		struct {
+			struct drm_display_mode mode;
+
+			uint32_t ctv_regs[38];
+		} ctv_enc_mode;
+	};
+
+} nv17_tv_norms[NUM_TV_NORMS];
+#define get_tv_norm(enc) (&nv17_tv_norms[to_tv_enc(enc)->tv_norm])
+
+extern struct drm_display_mode nv17_tv_modes[];
+
+static inline int interpolate(int y0, int y1, int y2, int x)
+{
+	return y1 + (x < 50 ? y1 - y0 : y2 - y1) * (x - 50) / 50;
+}
+
+void nv17_tv_state_save(struct drm_device *dev, struct nv17_tv_state *state);
+void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state);
+void nv17_tv_update_properties(struct drm_encoder *encoder);
+void nv17_tv_update_rescaler(struct drm_encoder *encoder);
+void nv17_ctv_update_rescaler(struct drm_encoder *encoder);
+
+/* TV hardware access functions */
+
+static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, uint32_t val)
+{
+	nv_wr32(dev, reg, val);
+}
+
+static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg)
+{
+	return nv_rd32(dev, reg);
+}
+
+static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg, uint8_t val)
+{
+	nv_write_ptv(dev, NV_PTV_TV_INDEX, reg);
+	nv_write_ptv(dev, NV_PTV_TV_DATA, val);
+}
+
+static inline uint8_t nv_read_tv_enc(struct drm_device *dev, uint8_t reg)
+{
+	nv_write_ptv(dev, NV_PTV_TV_INDEX, reg);
+	return nv_read_ptv(dev, NV_PTV_TV_DATA);
+}
+
+#define nv_load_ptv(dev, state, reg) nv_write_ptv(dev, NV_PTV_OFFSET + 0x##reg, state->ptv_##reg)
+#define nv_save_ptv(dev, state, reg) state->ptv_##reg = nv_read_ptv(dev, NV_PTV_OFFSET + 0x##reg)
+#define nv_load_tv_enc(dev, state, reg) nv_write_tv_enc(dev, 0x##reg, state->tv_enc[0x##reg])
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv17_tv_modes.c b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
new file mode 100644
index 0000000..d64683d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
@@ -0,0 +1,583 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+#include "nouveau_drv.h"
+#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
+#include "nouveau_hw.h"
+#include "nv17_tv.h"
+
+char *nv17_tv_norm_names[NUM_TV_NORMS] = {
+	[TV_NORM_PAL] = "PAL",
+	[TV_NORM_PAL_M] = "PAL-M",
+	[TV_NORM_PAL_N] = "PAL-N",
+	[TV_NORM_PAL_NC] = "PAL-Nc",
+	[TV_NORM_NTSC_M] = "NTSC-M",
+	[TV_NORM_NTSC_J] = "NTSC-J",
+	[TV_NORM_HD480I] = "hd480i",
+	[TV_NORM_HD480P] = "hd480p",
+	[TV_NORM_HD576I] = "hd576i",
+	[TV_NORM_HD576P] = "hd576p",
+	[TV_NORM_HD720P] = "hd720p",
+	[TV_NORM_HD1080I] = "hd1080i"
+};
+
+/* TV standard specific parameters */
+
+struct nv17_tv_norm_params nv17_tv_norms[NUM_TV_NORMS] = {
+	[TV_NORM_PAL] = { TV_ENC_MODE, {
+			.tv_enc_mode = { 720, 576, 50000, {
+					0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
+					0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
+					0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
+					0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
+					0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
+					0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
+					0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
+					0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
+				} } } },
+
+	[TV_NORM_PAL_M] = { TV_ENC_MODE, {
+			.tv_enc_mode = { 720, 480, 59940, {
+					0x21, 0xe6, 0xef, 0xe3, 0x0, 0x0, 0xb, 0x18,
+					0x7e, 0x44, 0x76, 0x32, 0x25, 0x0, 0x3c, 0x0,
+					0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
+					0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
+					0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
+					0x0, 0x18, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
+					0x0, 0xb4, 0x0, 0x15, 0x40, 0x10, 0x0, 0x9c,
+					0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
+				} } } },
+
+	[TV_NORM_PAL_N] = { TV_ENC_MODE, {
+			.tv_enc_mode = { 720, 576, 50000, {
+					0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
+					0x7e, 0x40, 0x8a, 0x32, 0x25, 0x0, 0x3c, 0x0,
+					0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
+					0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
+					0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
+					0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
+					0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
+					0xbd, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
+				} } } },
+
+	[TV_NORM_PAL_NC] = { TV_ENC_MODE, {
+			.tv_enc_mode = { 720, 576, 50000, {
+					0x21, 0xf6, 0x94, 0x46, 0x0, 0x0, 0xb, 0x18,
+					0x7e, 0x44, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
+					0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
+					0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
+					0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
+					0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
+					0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
+					0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
+				} } } },
+
+	[TV_NORM_NTSC_M] = { TV_ENC_MODE, {
+			.tv_enc_mode = { 720, 480, 59940, {
+					0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
+					0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x3c, 0x0,
+					0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
+					0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
+					0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
+					0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
+					0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0x9c,
+					0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
+				} } } },
+
+	[TV_NORM_NTSC_J] = { TV_ENC_MODE, {
+			.tv_enc_mode = { 720, 480, 59940, {
+					0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
+					0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0,
+					0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
+					0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
+					0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5,
+					0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
+					0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4,
+					0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
+				} } } },
+
+	[TV_NORM_HD480I] = { TV_ENC_MODE, {
+			.tv_enc_mode = { 720, 480, 59940, {
+					0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
+					0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0,
+					0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
+					0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
+					0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5,
+					0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
+					0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4,
+					0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
+				} } } },
+
+	[TV_NORM_HD576I] = { TV_ENC_MODE, {
+			.tv_enc_mode = { 720, 576, 50000, {
+					0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
+					0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
+					0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
+					0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
+					0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
+					0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
+					0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
+					0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
+				} } } },
+
+
+	[TV_NORM_HD480P] = { CTV_ENC_MODE, {
+			.ctv_enc_mode = {
+				.mode = { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000,
+						   720, 735, 743, 858, 0, 480, 490, 494, 525, 0,
+						   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+				.ctv_regs = { 0x3540000, 0x0, 0x0, 0x314,
+					      0x354003a, 0x40000, 0x6f0344, 0x18100000,
+					      0x10160004, 0x10060005, 0x1006000c, 0x10060020,
+					      0x10060021, 0x140e0022, 0x10060202, 0x1802020a,
+					      0x1810020b, 0x10000fff, 0x10000fff, 0x10000fff,
+					      0x10000fff, 0x10000fff, 0x10000fff, 0x70,
+					      0x3ff0000, 0x57, 0x2e001e, 0x258012c,
+					      0xa0aa04ec, 0x30, 0x80960019, 0x12c0300,
+					      0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400
+				} } } },
+
+	[TV_NORM_HD576P] = { CTV_ENC_MODE, {
+			.ctv_enc_mode = {
+				.mode = { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000,
+						   720, 730, 738, 864, 0, 576, 581, 585, 625, 0,
+						   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+				.ctv_regs = { 0x3540000, 0x0, 0x0, 0x314,
+					      0x354003a, 0x40000, 0x6f0344, 0x18100000,
+					      0x10060001, 0x10060009, 0x10060026, 0x10060027,
+					      0x140e0028, 0x10060268, 0x1810026d, 0x10000fff,
+					      0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff,
+					      0x10000fff, 0x10000fff, 0x10000fff, 0x69,
+					      0x3ff0000, 0x57, 0x2e001e, 0x258012c,
+					      0xa0aa04ec, 0x30, 0x80960019, 0x12c0300,
+					      0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400
+				} } } },
+
+	[TV_NORM_HD720P] = { CTV_ENC_MODE, {
+			.ctv_enc_mode = {
+				.mode = { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250,
+						   1280, 1349, 1357, 1650, 0, 720, 725, 730, 750, 0,
+						   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+				.ctv_regs = { 0x1260394, 0x0, 0x0, 0x622,
+					      0x66b0021, 0x6004a, 0x1210626, 0x8170000,
+					      0x70004, 0x70016, 0x70017, 0x40f0018,
+					      0x702e8, 0x81702ed, 0xfff, 0xfff,
+					      0xfff, 0xfff, 0xfff, 0xfff,
+					      0xfff, 0xfff, 0xfff, 0x0,
+					      0x2e40001, 0x58, 0x2e001e, 0x258012c,
+					      0xa0aa04ec, 0x30, 0x810c0039, 0x12c0300,
+					      0xc0002039, 0x600, 0x32060039, 0x0, 0x0, 0x0
+				} } } },
+
+	[TV_NORM_HD1080I] = { CTV_ENC_MODE, {
+			.ctv_enc_mode = {
+				.mode = { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250,
+						   1920, 1961, 2049, 2200, 0, 1080, 1084, 1088, 1125, 0,
+						   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC
+						   | DRM_MODE_FLAG_INTERLACE) },
+				.ctv_regs = { 0xac0420, 0x44c0478, 0x4a4, 0x4fc0868,
+					      0x8940028, 0x60054, 0xe80870, 0xbf70000,
+					      0xbc70004, 0x70005, 0x70012, 0x70013,
+					      0x40f0014, 0x70230, 0xbf70232, 0xbf70233,
+					      0x1c70237, 0x70238, 0x70244, 0x70245,
+					      0x40f0246, 0x70462, 0x1f70464, 0x0,
+					      0x2e40001, 0x58, 0x2e001e, 0x258012c,
+					      0xa0aa04ec, 0x30, 0x815f004c, 0x12c0300,
+					      0xc000204c, 0x600, 0x3206004c, 0x0, 0x0, 0x0
+				} } } }
+};
+
+/*
+ * The following is some guesswork on how the TV encoder flicker
+ * filter/rescaler works:
+ *
+ * It seems to use some sort of resampling filter, it is controlled
+ * through the registers at NV_PTV_HFILTER and NV_PTV_VFILTER, they
+ * control the horizontal and vertical stage respectively, there is
+ * also NV_PTV_HFILTER2 the blob fills identically to NV_PTV_HFILTER,
+ * but they seem to do nothing. A rough guess might be that they could
+ * be used to independently control the filtering of each interlaced
+ * field, but I don't know how they are enabled. The whole filtering
+ * process seems to be disabled with bits 26:27 of PTV_200, but we
+ * aren't doing that.
+ *
+ * The layout of both register sets is the same:
+ *
+ * A: [BASE+0x18]...[BASE+0x0] [BASE+0x58]..[BASE+0x40]
+ * B: [BASE+0x34]...[BASE+0x1c] [BASE+0x74]..[BASE+0x5c]
+ *
+ * Each coefficient is stored in bits [31],[15:9] in two's complement
+ * format. They seem to be some kind of weights used in a low-pass
+ * filter. Both A and B coefficients are applied to the 14 nearest
+ * samples on each side (Listed from nearest to furthermost.  They
+ * roughly cover 2 framebuffer pixels on each side).  They are
+ * probably multiplied with some more hardwired weights before being
+ * used: B-coefficients are applied the same on both sides,
+ * A-coefficients are inverted before being applied to the opposite
+ * side.
+ *
+ * After all the hassle, I got the following formula by empirical
+ * means...
+ */
+
+#define calc_overscan(o) interpolate(0x100, 0xe1, 0xc1, o)
+
+#define id1 (1LL << 8)
+#define id2 (1LL << 16)
+#define id3 (1LL << 24)
+#define id4 (1LL << 32)
+#define id5 (1LL << 48)
+
+static struct filter_params{
+	int64_t k1;
+	int64_t ki;
+	int64_t ki2;
+	int64_t ki3;
+	int64_t kr;
+	int64_t kir;
+	int64_t ki2r;
+	int64_t ki3r;
+	int64_t kf;
+	int64_t kif;
+	int64_t ki2f;
+	int64_t ki3f;
+	int64_t krf;
+	int64_t kirf;
+	int64_t ki2rf;
+	int64_t ki3rf;
+} fparams[2][4] = {
+	/* Horizontal filter parameters */
+	{
+		{64.311690 * id5, -39.516924 * id5, 6.586143 * id5, 0.000002 * id5,
+		 0.051285 * id4, 26.168746 * id4, -4.361449 * id4, -0.000001 * id4,
+		 9.308169 * id3, 78.180965 * id3, -13.030158 * id3, -0.000001 * id3,
+		 -8.801540 * id1, -46.572890 * id1, 7.762145 * id1, -0.000000 * id1},
+		{-44.565569 * id5, -68.081246 * id5, 39.812074 * id5, -4.009316 * id5,
+		 29.832207 * id4, 50.047322 * id4, -25.380017 * id4, 2.546422 * id4,
+		 104.605622 * id3, 141.908641 * id3, -74.322319 * id3, 7.484316 * id3,
+		 -37.081621 * id1, -90.397510 * id1, 42.784229 * id1, -4.289952 * id1},
+		{-56.793244 * id5, 31.153584 * id5, -5.192247 * id5, -0.000003 * id5,
+		 33.541131 * id4, -34.149302 * id4, 5.691537 * id4, 0.000002 * id4,
+		 87.196610 * id3, -88.995169 * id3, 14.832456 * id3, 0.000012 * id3,
+		 17.288138 * id1, 71.864786 * id1, -11.977408 * id1, -0.000009 * id1},
+		{51.787796 * id5, 21.211771 * id5, -18.993730 * id5, 1.853310 * id5,
+		 -41.470726 * id4, -17.775823 * id4, 13.057821 * id4, -1.15823 * id4,
+		 -154.235673 * id3, -44.878641 * id3, 40.656077 * id3, -3.695595 * id3,
+		 112.201065 * id1, 39.992155 * id1, -25.155714 * id1, 2.113984 * id1},
+	},
+
+	/* Vertical filter parameters */
+	{
+		{67.601979 * id5, 0.428319 * id5, -0.071318 * id5, -0.000012 * id5,
+		 -3.402339 * id4, 0.000209 * id4, -0.000092 * id4, 0.000010 * id4,
+		 -9.180996 * id3, 6.111270 * id3, -1.024457 * id3, 0.001043 * id3,
+		 6.060315 * id1, -0.017425 * id1, 0.007830 * id1, -0.000869 * id1},
+		{6.755647 * id5, 5.841348 * id5, 1.469734 * id5, -0.149656 * id5,
+		 8.293120 * id4, -1.192888 * id4, -0.947652 * id4, 0.094507 * id4,
+		 37.526655 * id3, 10.257875 * id3, -10.823275 * id3, 1.081497 * id3,
+		 -2.361928 * id1, -2.059432 * id1, 1.840671 * id1, -0.168100 * id1},
+		{-14.780391 * id5, -16.042148 * id5, 2.673692 * id5, -0.000000 * id5,
+		 39.541978 * id4, 5.680053 * id4, -0.946676 * id4, 0.000000 * id4,
+		 152.994486 * id3, 12.625439 * id3, -2.119579 * id3, 0.002708 * id3,
+		 -38.125089 * id1, -0.855880 * id1, 0.155359 * id1, -0.002245 * id1},
+		{-27.476193 * id5, -1.454976 * id5, 1.286557 * id5, 0.025346 * id5,
+		 20.687300 * id4, 3.014003 * id4, -0.557786 * id4, -0.01311 * id4,
+		 60.008737 * id3, -0.738273 * id3, 5.408217 * id3, -0.796798 * id3,
+		 -17.296835 * id1, 4.438577 * id1, -2.809420 * id1, 0.385491 * id1},
+	}
+};
+
+static void tv_setup_filter(struct drm_encoder *encoder)
+{
+	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+	struct drm_display_mode *mode = &encoder->crtc->mode;
+	uint32_t (*filters[])[4][7] = {&tv_enc->state.hfilter,
+				       &tv_enc->state.vfilter};
+	int i, j, k;
+	int32_t overscan = calc_overscan(tv_enc->overscan);
+	int64_t flicker = (tv_enc->flicker - 50) * (id3 / 100);
+	uint64_t rs[] = {mode->hdisplay * id3,
+			 mode->vdisplay * id3};
+
+	do_div(rs[0], overscan * tv_norm->tv_enc_mode.hdisplay);
+	do_div(rs[1], overscan * tv_norm->tv_enc_mode.vdisplay);
+
+	for (k = 0; k < 2; k++) {
+		rs[k] = max((int64_t)rs[k], id2);
+
+		for (j = 0; j < 4; j++) {
+			struct filter_params *p = &fparams[k][j];
+
+			for (i = 0; i < 7; i++) {
+				int64_t c = (p->k1 + p->ki*i + p->ki2*i*i + p->ki3*i*i*i)
+					+ (p->kr + p->kir*i + p->ki2r*i*i + p->ki3r*i*i*i)*rs[k]
+					+ (p->kf + p->kif*i + p->ki2f*i*i + p->ki3f*i*i*i)*flicker
+					+ (p->krf + p->kirf*i + p->ki2rf*i*i + p->ki3rf*i*i*i)*flicker*rs[k];
+
+				(*filters[k])[j][i] = (c + id5/2) >> 39 & (0x1 << 31 | 0x7f << 9);
+			}
+		}
+	}
+}
+
+/* Hardware state saving/restoring */
+
+static void tv_save_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7])
+{
+	int i, j;
+	uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
+
+	for (i = 0; i < 4; i++) {
+		for (j = 0; j < 7; j++)
+			regs[i][j] = nv_read_ptv(dev, offsets[i]+4*j);
+	}
+}
+
+static void tv_load_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7])
+{
+	int i, j;
+	uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
+
+	for (i = 0; i < 4; i++) {
+		for (j = 0; j < 7; j++)
+			nv_write_ptv(dev, offsets[i]+4*j, regs[i][j]);
+	}
+}
+
+void nv17_tv_state_save(struct drm_device *dev, struct nv17_tv_state *state)
+{
+	int i;
+
+	for (i = 0; i < 0x40; i++)
+		state->tv_enc[i] = nv_read_tv_enc(dev, i);
+
+	tv_save_filter(dev, NV_PTV_HFILTER, state->hfilter);
+	tv_save_filter(dev, NV_PTV_HFILTER2, state->hfilter2);
+	tv_save_filter(dev, NV_PTV_VFILTER, state->vfilter);
+
+	nv_save_ptv(dev, state, 200);
+	nv_save_ptv(dev, state, 204);
+	nv_save_ptv(dev, state, 208);
+	nv_save_ptv(dev, state, 20c);
+	nv_save_ptv(dev, state, 304);
+	nv_save_ptv(dev, state, 500);
+	nv_save_ptv(dev, state, 504);
+	nv_save_ptv(dev, state, 508);
+	nv_save_ptv(dev, state, 600);
+	nv_save_ptv(dev, state, 604);
+	nv_save_ptv(dev, state, 608);
+	nv_save_ptv(dev, state, 60c);
+	nv_save_ptv(dev, state, 610);
+	nv_save_ptv(dev, state, 614);
+}
+
+void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state)
+{
+	int i;
+
+	for (i = 0; i < 0x40; i++)
+		nv_write_tv_enc(dev, i, state->tv_enc[i]);
+
+	tv_load_filter(dev, NV_PTV_HFILTER, state->hfilter);
+	tv_load_filter(dev, NV_PTV_HFILTER2, state->hfilter2);
+	tv_load_filter(dev, NV_PTV_VFILTER, state->vfilter);
+
+	nv_load_ptv(dev, state, 200);
+	nv_load_ptv(dev, state, 204);
+	nv_load_ptv(dev, state, 208);
+	nv_load_ptv(dev, state, 20c);
+	nv_load_ptv(dev, state, 304);
+	nv_load_ptv(dev, state, 500);
+	nv_load_ptv(dev, state, 504);
+	nv_load_ptv(dev, state, 508);
+	nv_load_ptv(dev, state, 600);
+	nv_load_ptv(dev, state, 604);
+	nv_load_ptv(dev, state, 608);
+	nv_load_ptv(dev, state, 60c);
+	nv_load_ptv(dev, state, 610);
+	nv_load_ptv(dev, state, 614);
+
+	/* This is required for some settings to kick in. */
+	nv_write_tv_enc(dev, 0x3e, 1);
+	nv_write_tv_enc(dev, 0x3e, 0);
+}
+
+/* Timings similar to the ones the blob sets */
+
+struct drm_display_mode nv17_tv_modes[] = {
+	{ DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 0,
+		   320, 344, 392, 560, 0, 200, 200, 202, 220, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC
+		   | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
+	{ DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 0,
+		   320, 344, 392, 560, 0, 240, 240, 246, 263, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC
+		   | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
+	{ DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 0,
+		   400, 432, 496, 640, 0, 300, 300, 303, 314, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC
+		   | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
+	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 0,
+		   640, 672, 768, 880, 0, 480, 480, 492, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 0,
+		   720, 752, 872, 960, 0, 480, 480, 493, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 0,
+		   720, 776, 856, 960, 0, 576, 576, 588, 597, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 0,
+		   800, 840, 920, 1040, 0, 600, 600, 604, 618, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 0,
+		   1024, 1064, 1200, 1344, 0, 768, 768, 777, 806, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	{}
+};
+
+void nv17_tv_update_properties(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+	struct nv17_tv_state *regs = &tv_enc->state;
+	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+	int subconnector = tv_enc->select_subconnector ?
+						tv_enc->select_subconnector :
+						tv_enc->subconnector;
+
+	switch (subconnector) {
+	case DRM_MODE_SUBCONNECTOR_Composite:
+	{
+		regs->ptv_204 = 0x2;
+
+		/* The composite connector may be found on either pin. */
+		if (tv_enc->pin_mask & 0x4)
+			regs->ptv_204 |= 0x010000;
+		else if (tv_enc->pin_mask & 0x2)
+			regs->ptv_204 |= 0x100000;
+		else
+			regs->ptv_204 |= 0x110000;
+
+		regs->tv_enc[0x7] = 0x10;
+		break;
+	}
+	case DRM_MODE_SUBCONNECTOR_SVIDEO:
+		regs->ptv_204 = 0x11012;
+		regs->tv_enc[0x7] = 0x18;
+		break;
+
+	case DRM_MODE_SUBCONNECTOR_Component:
+		regs->ptv_204 = 0x111333;
+		regs->tv_enc[0x7] = 0x14;
+		break;
+
+	case DRM_MODE_SUBCONNECTOR_SCART:
+		regs->ptv_204 = 0x111012;
+		regs->tv_enc[0x7] = 0x18;
+		break;
+	}
+
+	regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20], 255,
+					 tv_enc->saturation);
+	regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22], 255,
+					 tv_enc->saturation);
+	regs->tv_enc[0x25] = tv_enc->hue * 255 / 100;
+
+	nv_load_ptv(dev, regs, 204);
+	nv_load_tv_enc(dev, regs, 7);
+	nv_load_tv_enc(dev, regs, 20);
+	nv_load_tv_enc(dev, regs, 22);
+	nv_load_tv_enc(dev, regs, 25);
+}
+
+void nv17_tv_update_rescaler(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+	struct nv17_tv_state *regs = &tv_enc->state;
+
+	regs->ptv_208 = 0x40 | (calc_overscan(tv_enc->overscan) << 8);
+
+	tv_setup_filter(encoder);
+
+	nv_load_ptv(dev, regs, 208);
+	tv_load_filter(dev, NV_PTV_HFILTER, regs->hfilter);
+	tv_load_filter(dev, NV_PTV_HFILTER2, regs->hfilter2);
+	tv_load_filter(dev, NV_PTV_VFILTER, regs->vfilter);
+}
+
+void nv17_ctv_update_rescaler(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+	int head = nouveau_crtc(encoder->crtc)->index;
+	struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head];
+	struct drm_display_mode *crtc_mode = &encoder->crtc->mode;
+	struct drm_display_mode *output_mode = &get_tv_norm(encoder)->ctv_enc_mode.mode;
+	int overscan, hmargin, vmargin, hratio, vratio;
+
+	/* The rescaler doesn't do the right thing for interlaced modes. */
+	if (output_mode->flags & DRM_MODE_FLAG_INTERLACE)
+		overscan = 100;
+	else
+		overscan = tv_enc->overscan;
+
+	hmargin = (output_mode->hdisplay - crtc_mode->hdisplay) / 2;
+	vmargin = (output_mode->vdisplay - crtc_mode->vdisplay) / 2;
+
+	hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20), hmargin,
+			      overscan);
+	vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20), vmargin,
+			      overscan);
+
+	hratio = crtc_mode->hdisplay * 0x800 / (output_mode->hdisplay - 2*hmargin);
+	vratio = crtc_mode->vdisplay * 0x800 / (output_mode->vdisplay - 2*vmargin) & ~3;
+
+	regs->fp_horiz_regs[FP_VALID_START] = hmargin;
+	regs->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - hmargin - 1;
+	regs->fp_vert_regs[FP_VALID_START] = vmargin;
+	regs->fp_vert_regs[FP_VALID_END] = output_mode->vdisplay - vmargin - 1;
+
+	regs->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE |
+		XLATE(vratio, 0, NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE) |
+		NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE |
+		XLATE(hratio, 0, NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE);
+
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_START,
+		      regs->fp_horiz_regs[FP_VALID_START]);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_END,
+		      regs->fp_horiz_regs[FP_VALID_END]);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_START,
+		      regs->fp_vert_regs[FP_VALID_START]);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_END,
+		      regs->fp_vert_regs[FP_VALID_END]);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regs->fp_debug_1);
+}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
new file mode 100644
index 0000000..18ba74f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -0,0 +1,780 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+/*
+ * NV20
+ * -----
+ * There are 3 families :
+ * NV20 is 0x10de:0x020*
+ * NV25/28 is 0x10de:0x025* / 0x10de:0x028*
+ * NV2A is 0x10de:0x02A0
+ *
+ * NV30
+ * -----
+ * There are 3 families :
+ * NV30/31 is 0x10de:0x030* / 0x10de:0x031*
+ * NV34 is 0x10de:0x032*
+ * NV35/36 is 0x10de:0x033* / 0x10de:0x034*
+ *
+ * Not seen in the wild, no dumps (probably NV35) :
+ * NV37 is 0x10de:0x00fc, 0x10de:0x00fd
+ * NV38 is 0x10de:0x0333, 0x10de:0x00fe
+ *
+ */
+
+#define NV20_GRCTX_SIZE (3580*4)
+#define NV25_GRCTX_SIZE (3529*4)
+#define NV2A_GRCTX_SIZE (3500*4)
+
+#define NV30_31_GRCTX_SIZE (24392)
+#define NV34_GRCTX_SIZE    (18140)
+#define NV35_36_GRCTX_SIZE (22396)
+
+static void
+nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+	int i;
+
+	nv_wo32(dev, ctx, 0x033c/4, 0xffff0000);
+	nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000);
+	nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000);
+	nv_wo32(dev, ctx, 0x047c/4, 0x00000101);
+	nv_wo32(dev, ctx, 0x0490/4, 0x00000111);
+	nv_wo32(dev, ctx, 0x04a8/4, 0x44400000);
+	for (i = 0x04d4; i <= 0x04e0; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x00030303);
+	for (i = 0x04f4; i <= 0x0500; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x00080000);
+	for (i = 0x050c; i <= 0x0518; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x01012000);
+	for (i = 0x051c; i <= 0x0528; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x000105b8);
+	for (i = 0x052c; i <= 0x0538; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x00080008);
+	for (i = 0x055c; i <= 0x0598; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x07ff0000);
+	nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff);
+	nv_wo32(dev, ctx, 0x05fc/4, 0x00000001);
+	nv_wo32(dev, ctx, 0x0604/4, 0x00004000);
+	nv_wo32(dev, ctx, 0x0610/4, 0x00000001);
+	nv_wo32(dev, ctx, 0x0618/4, 0x00040000);
+	nv_wo32(dev, ctx, 0x061c/4, 0x00010000);
+	for (i = 0x1c1c; i <= 0x248c; i += 16) {
+		nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
+		nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
+		nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
+	}
+	nv_wo32(dev, ctx, 0x281c/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x2830/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x285c/4, 0x40000000);
+	nv_wo32(dev, ctx, 0x2860/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x2864/4, 0x3f000000);
+	nv_wo32(dev, ctx, 0x286c/4, 0x40000000);
+	nv_wo32(dev, ctx, 0x2870/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x2878/4, 0xbf800000);
+	nv_wo32(dev, ctx, 0x2880/4, 0xbf800000);
+	nv_wo32(dev, ctx, 0x34a4/4, 0x000fe000);
+	nv_wo32(dev, ctx, 0x3530/4, 0x000003f8);
+	nv_wo32(dev, ctx, 0x3540/4, 0x002fe000);
+	for (i = 0x355c; i <= 0x3578; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x001c527c);
+}
+
+static void
+nv25_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+	int i;
+
+	nv_wo32(dev, ctx, 0x035c/4, 0xffff0000);
+	nv_wo32(dev, ctx, 0x03c0/4, 0x0fff0000);
+	nv_wo32(dev, ctx, 0x03c4/4, 0x0fff0000);
+	nv_wo32(dev, ctx, 0x049c/4, 0x00000101);
+	nv_wo32(dev, ctx, 0x04b0/4, 0x00000111);
+	nv_wo32(dev, ctx, 0x04c8/4, 0x00000080);
+	nv_wo32(dev, ctx, 0x04cc/4, 0xffff0000);
+	nv_wo32(dev, ctx, 0x04d0/4, 0x00000001);
+	nv_wo32(dev, ctx, 0x04e4/4, 0x44400000);
+	nv_wo32(dev, ctx, 0x04fc/4, 0x4b800000);
+	for (i = 0x0510; i <= 0x051c; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x00030303);
+	for (i = 0x0530; i <= 0x053c; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x00080000);
+	for (i = 0x0548; i <= 0x0554; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x01012000);
+	for (i = 0x0558; i <= 0x0564; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x000105b8);
+	for (i = 0x0568; i <= 0x0574; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x00080008);
+	for (i = 0x0598; i <= 0x05d4; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x07ff0000);
+	nv_wo32(dev, ctx, 0x05e0/4, 0x4b7fffff);
+	nv_wo32(dev, ctx, 0x0620/4, 0x00000080);
+	nv_wo32(dev, ctx, 0x0624/4, 0x30201000);
+	nv_wo32(dev, ctx, 0x0628/4, 0x70605040);
+	nv_wo32(dev, ctx, 0x062c/4, 0xb0a09080);
+	nv_wo32(dev, ctx, 0x0630/4, 0xf0e0d0c0);
+	nv_wo32(dev, ctx, 0x0664/4, 0x00000001);
+	nv_wo32(dev, ctx, 0x066c/4, 0x00004000);
+	nv_wo32(dev, ctx, 0x0678/4, 0x00000001);
+	nv_wo32(dev, ctx, 0x0680/4, 0x00040000);
+	nv_wo32(dev, ctx, 0x0684/4, 0x00010000);
+	for (i = 0x1b04; i <= 0x2374; i += 16) {
+		nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
+		nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
+		nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
+	}
+	nv_wo32(dev, ctx, 0x2704/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x2718/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x2744/4, 0x40000000);
+	nv_wo32(dev, ctx, 0x2748/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x274c/4, 0x3f000000);
+	nv_wo32(dev, ctx, 0x2754/4, 0x40000000);
+	nv_wo32(dev, ctx, 0x2758/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x2760/4, 0xbf800000);
+	nv_wo32(dev, ctx, 0x2768/4, 0xbf800000);
+	nv_wo32(dev, ctx, 0x308c/4, 0x000fe000);
+	nv_wo32(dev, ctx, 0x3108/4, 0x000003f8);
+	nv_wo32(dev, ctx, 0x3468/4, 0x002fe000);
+	for (i = 0x3484; i <= 0x34a0; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x001c527c);
+}
+
+static void
+nv2a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+	int i;
+
+	nv_wo32(dev, ctx, 0x033c/4, 0xffff0000);
+	nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000);
+	nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000);
+	nv_wo32(dev, ctx, 0x047c/4, 0x00000101);
+	nv_wo32(dev, ctx, 0x0490/4, 0x00000111);
+	nv_wo32(dev, ctx, 0x04a8/4, 0x44400000);
+	for (i = 0x04d4; i <= 0x04e0; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x00030303);
+	for (i = 0x04f4; i <= 0x0500; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x00080000);
+	for (i = 0x050c; i <= 0x0518; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x01012000);
+	for (i = 0x051c; i <= 0x0528; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x000105b8);
+	for (i = 0x052c; i <= 0x0538; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x00080008);
+	for (i = 0x055c; i <= 0x0598; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x07ff0000);
+	nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff);
+	nv_wo32(dev, ctx, 0x05fc/4, 0x00000001);
+	nv_wo32(dev, ctx, 0x0604/4, 0x00004000);
+	nv_wo32(dev, ctx, 0x0610/4, 0x00000001);
+	nv_wo32(dev, ctx, 0x0618/4, 0x00040000);
+	nv_wo32(dev, ctx, 0x061c/4, 0x00010000);
+	for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
+		nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
+		nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
+		nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
+	}
+	nv_wo32(dev, ctx, 0x269c/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x26b0/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x26dc/4, 0x40000000);
+	nv_wo32(dev, ctx, 0x26e0/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x26e4/4, 0x3f000000);
+	nv_wo32(dev, ctx, 0x26ec/4, 0x40000000);
+	nv_wo32(dev, ctx, 0x26f0/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x26f8/4, 0xbf800000);
+	nv_wo32(dev, ctx, 0x2700/4, 0xbf800000);
+	nv_wo32(dev, ctx, 0x3024/4, 0x000fe000);
+	nv_wo32(dev, ctx, 0x30a0/4, 0x000003f8);
+	nv_wo32(dev, ctx, 0x33fc/4, 0x002fe000);
+	for (i = 0x341c; i <= 0x3438; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x001c527c);
+}
+
+static void
+nv30_31_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+	int i;
+
+	nv_wo32(dev, ctx, 0x0410/4, 0x00000101);
+	nv_wo32(dev, ctx, 0x0424/4, 0x00000111);
+	nv_wo32(dev, ctx, 0x0428/4, 0x00000060);
+	nv_wo32(dev, ctx, 0x0444/4, 0x00000080);
+	nv_wo32(dev, ctx, 0x0448/4, 0xffff0000);
+	nv_wo32(dev, ctx, 0x044c/4, 0x00000001);
+	nv_wo32(dev, ctx, 0x0460/4, 0x44400000);
+	nv_wo32(dev, ctx, 0x048c/4, 0xffff0000);
+	for (i = 0x04e0; i < 0x04e8; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x0fff0000);
+	nv_wo32(dev, ctx, 0x04ec/4, 0x00011100);
+	for (i = 0x0508; i < 0x0548; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x07ff0000);
+	nv_wo32(dev, ctx, 0x0550/4, 0x4b7fffff);
+	nv_wo32(dev, ctx, 0x058c/4, 0x00000080);
+	nv_wo32(dev, ctx, 0x0590/4, 0x30201000);
+	nv_wo32(dev, ctx, 0x0594/4, 0x70605040);
+	nv_wo32(dev, ctx, 0x0598/4, 0xb8a89888);
+	nv_wo32(dev, ctx, 0x059c/4, 0xf8e8d8c8);
+	nv_wo32(dev, ctx, 0x05b0/4, 0xb0000000);
+	for (i = 0x0600; i < 0x0640; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x00010588);
+	for (i = 0x0640; i < 0x0680; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x00030303);
+	for (i = 0x06c0; i < 0x0700; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x0008aae4);
+	for (i = 0x0700; i < 0x0740; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x01012000);
+	for (i = 0x0740; i < 0x0780; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x00080008);
+	nv_wo32(dev, ctx, 0x085c/4, 0x00040000);
+	nv_wo32(dev, ctx, 0x0860/4, 0x00010000);
+	for (i = 0x0864; i < 0x0874; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x00040004);
+	for (i = 0x1f18; i <= 0x3088 ; i += 16) {
+		nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
+		nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
+		nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
+	}
+	for (i = 0x30b8; i < 0x30c8; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x0000ffff);
+	nv_wo32(dev, ctx, 0x344c/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x3808/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x381c/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x3848/4, 0x40000000);
+	nv_wo32(dev, ctx, 0x384c/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x3850/4, 0x3f000000);
+	nv_wo32(dev, ctx, 0x3858/4, 0x40000000);
+	nv_wo32(dev, ctx, 0x385c/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x3864/4, 0xbf800000);
+	nv_wo32(dev, ctx, 0x386c/4, 0xbf800000);
+}
+
+static void
+nv34_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+	int i;
+
+	nv_wo32(dev, ctx, 0x040c/4, 0x01000101);
+	nv_wo32(dev, ctx, 0x0420/4, 0x00000111);
+	nv_wo32(dev, ctx, 0x0424/4, 0x00000060);
+	nv_wo32(dev, ctx, 0x0440/4, 0x00000080);
+	nv_wo32(dev, ctx, 0x0444/4, 0xffff0000);
+	nv_wo32(dev, ctx, 0x0448/4, 0x00000001);
+	nv_wo32(dev, ctx, 0x045c/4, 0x44400000);
+	nv_wo32(dev, ctx, 0x0480/4, 0xffff0000);
+	for (i = 0x04d4; i < 0x04dc; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x0fff0000);
+	nv_wo32(dev, ctx, 0x04e0/4, 0x00011100);
+	for (i = 0x04fc; i < 0x053c; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x07ff0000);
+	nv_wo32(dev, ctx, 0x0544/4, 0x4b7fffff);
+	nv_wo32(dev, ctx, 0x057c/4, 0x00000080);
+	nv_wo32(dev, ctx, 0x0580/4, 0x30201000);
+	nv_wo32(dev, ctx, 0x0584/4, 0x70605040);
+	nv_wo32(dev, ctx, 0x0588/4, 0xb8a89888);
+	nv_wo32(dev, ctx, 0x058c/4, 0xf8e8d8c8);
+	nv_wo32(dev, ctx, 0x05a0/4, 0xb0000000);
+	for (i = 0x05f0; i < 0x0630; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x00010588);
+	for (i = 0x0630; i < 0x0670; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x00030303);
+	for (i = 0x06b0; i < 0x06f0; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x0008aae4);
+	for (i = 0x06f0; i < 0x0730; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x01012000);
+	for (i = 0x0730; i < 0x0770; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x00080008);
+	nv_wo32(dev, ctx, 0x0850/4, 0x00040000);
+	nv_wo32(dev, ctx, 0x0854/4, 0x00010000);
+	for (i = 0x0858; i < 0x0868; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x00040004);
+	for (i = 0x15ac; i <= 0x271c ; i += 16) {
+		nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
+		nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
+		nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
+	}
+	for (i = 0x274c; i < 0x275c; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x0000ffff);
+	nv_wo32(dev, ctx, 0x2ae0/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x2e9c/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x2eb0/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x2edc/4, 0x40000000);
+	nv_wo32(dev, ctx, 0x2ee0/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x2ee4/4, 0x3f000000);
+	nv_wo32(dev, ctx, 0x2eec/4, 0x40000000);
+	nv_wo32(dev, ctx, 0x2ef0/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x2ef8/4, 0xbf800000);
+	nv_wo32(dev, ctx, 0x2f00/4, 0xbf800000);
+}
+
+static void
+nv35_36_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+	int i;
+
+	nv_wo32(dev, ctx, 0x040c/4, 0x00000101);
+	nv_wo32(dev, ctx, 0x0420/4, 0x00000111);
+	nv_wo32(dev, ctx, 0x0424/4, 0x00000060);
+	nv_wo32(dev, ctx, 0x0440/4, 0x00000080);
+	nv_wo32(dev, ctx, 0x0444/4, 0xffff0000);
+	nv_wo32(dev, ctx, 0x0448/4, 0x00000001);
+	nv_wo32(dev, ctx, 0x045c/4, 0x44400000);
+	nv_wo32(dev, ctx, 0x0488/4, 0xffff0000);
+	for (i = 0x04dc; i < 0x04e4; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x0fff0000);
+	nv_wo32(dev, ctx, 0x04e8/4, 0x00011100);
+	for (i = 0x0504; i < 0x0544; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x07ff0000);
+	nv_wo32(dev, ctx, 0x054c/4, 0x4b7fffff);
+	nv_wo32(dev, ctx, 0x0588/4, 0x00000080);
+	nv_wo32(dev, ctx, 0x058c/4, 0x30201000);
+	nv_wo32(dev, ctx, 0x0590/4, 0x70605040);
+	nv_wo32(dev, ctx, 0x0594/4, 0xb8a89888);
+	nv_wo32(dev, ctx, 0x0598/4, 0xf8e8d8c8);
+	nv_wo32(dev, ctx, 0x05ac/4, 0xb0000000);
+	for (i = 0x0604; i < 0x0644; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x00010588);
+	for (i = 0x0644; i < 0x0684; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x00030303);
+	for (i = 0x06c4; i < 0x0704; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x0008aae4);
+	for (i = 0x0704; i < 0x0744; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x01012000);
+	for (i = 0x0744; i < 0x0784; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x00080008);
+	nv_wo32(dev, ctx, 0x0860/4, 0x00040000);
+	nv_wo32(dev, ctx, 0x0864/4, 0x00010000);
+	for (i = 0x0868; i < 0x0878; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x00040004);
+	for (i = 0x1f1c; i <= 0x308c ; i += 16) {
+		nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
+		nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
+		nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
+	}
+	for (i = 0x30bc; i < 0x30cc; i += 4)
+		nv_wo32(dev, ctx, i/4, 0x0000ffff);
+	nv_wo32(dev, ctx, 0x3450/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x380c/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x3820/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x384c/4, 0x40000000);
+	nv_wo32(dev, ctx, 0x3850/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x3854/4, 0x3f000000);
+	nv_wo32(dev, ctx, 0x385c/4, 0x40000000);
+	nv_wo32(dev, ctx, 0x3860/4, 0x3f800000);
+	nv_wo32(dev, ctx, 0x3868/4, 0xbf800000);
+	nv_wo32(dev, ctx, 0x3870/4, 0xbf800000);
+}
+
+int
+nv20_graph_create_context(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
+	unsigned int ctx_size;
+	unsigned int idoffs = 0x28/4;
+	int ret;
+
+	switch (dev_priv->chipset) {
+	case 0x20:
+		ctx_size = NV20_GRCTX_SIZE;
+		ctx_init = nv20_graph_context_init;
+		idoffs = 0;
+		break;
+	case 0x25:
+	case 0x28:
+		ctx_size = NV25_GRCTX_SIZE;
+		ctx_init = nv25_graph_context_init;
+		break;
+	case 0x2a:
+		ctx_size = NV2A_GRCTX_SIZE;
+		ctx_init = nv2a_graph_context_init;
+		idoffs = 0;
+		break;
+	case 0x30:
+	case 0x31:
+		ctx_size = NV30_31_GRCTX_SIZE;
+		ctx_init = nv30_31_graph_context_init;
+		break;
+	case 0x34:
+		ctx_size = NV34_GRCTX_SIZE;
+		ctx_init = nv34_graph_context_init;
+		break;
+	case 0x35:
+	case 0x36:
+		ctx_size = NV35_36_GRCTX_SIZE;
+		ctx_init = nv35_36_graph_context_init;
+		break;
+	default:
+		ctx_size = 0;
+		ctx_init = nv35_36_graph_context_init;
+		NV_ERROR(dev, "Please contact the devs if you want your NV%x"
+			      " card to work\n", dev_priv->chipset);
+		return -ENOSYS;
+		break;
+	}
+
+	ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16,
+					  NVOBJ_FLAG_ZERO_ALLOC,
+					  &chan->ramin_grctx);
+	if (ret)
+		return ret;
+
+	/* Initialise default context values */
+	dev_priv->engine.instmem.prepare_access(dev, true);
+	ctx_init(dev, chan->ramin_grctx->gpuobj);
+
+	/* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
+	nv_wo32(dev, chan->ramin_grctx->gpuobj, idoffs,
+					(chan->id << 24) | 0x1); /* CTX_USER */
+
+	nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id,
+			chan->ramin_grctx->instance >> 4);
+
+	dev_priv->engine.instmem.finish_access(dev);
+	return 0;
+}
+
+void
+nv20_graph_destroy_context(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (chan->ramin_grctx)
+		nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
+
+	dev_priv->engine.instmem.prepare_access(dev, true);
+	nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id, 0);
+	dev_priv->engine.instmem.finish_access(dev);
+}
+
+int
+nv20_graph_load_context(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	uint32_t inst;
+
+	if (!chan->ramin_grctx)
+		return -EINVAL;
+	inst = chan->ramin_grctx->instance >> 4;
+
+	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
+	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
+		     NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD);
+	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+
+	nouveau_wait_for_idle(dev);
+	return 0;
+}
+
+int
+nv20_graph_unload_context(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	struct nouveau_channel *chan;
+	uint32_t inst, tmp;
+
+	chan = pgraph->channel(dev);
+	if (!chan)
+		return 0;
+	inst = chan->ramin_grctx->instance >> 4;
+
+	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
+	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
+		     NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE);
+
+	nouveau_wait_for_idle(dev);
+
+	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
+	tmp  = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
+	tmp |= (pfifo->channels - 1) << 24;
+	nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
+	return 0;
+}
+
+static void
+nv20_graph_rdi(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int i, writecount = 32;
+	uint32_t rdi_index = 0x2c80000;
+
+	if (dev_priv->chipset == 0x20) {
+		rdi_index = 0x3d0000;
+		writecount = 15;
+	}
+
+	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, rdi_index);
+	for (i = 0; i < writecount; i++)
+		nv_wr32(dev, NV10_PGRAPH_RDI_DATA, 0);
+
+	nouveau_wait_for_idle(dev);
+}
+
+int
+nv20_graph_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv =
+		(struct drm_nouveau_private *)dev->dev_private;
+	uint32_t tmp, vramsz;
+	int ret, i;
+
+	nv_wr32(dev, NV03_PMC_ENABLE,
+		nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
+	nv_wr32(dev, NV03_PMC_ENABLE,
+		nv_rd32(dev, NV03_PMC_ENABLE) |  NV_PMC_ENABLE_PGRAPH);
+
+	if (!dev_priv->ctx_table) {
+		/* Create Context Pointer Table */
+		dev_priv->ctx_table_size = 32 * 4;
+		ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
+						  dev_priv->ctx_table_size, 16,
+						  NVOBJ_FLAG_ZERO_ALLOC,
+						  &dev_priv->ctx_table);
+		if (ret)
+			return ret;
+	}
+
+	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
+		 dev_priv->ctx_table->instance >> 4);
+
+	nv20_graph_rdi(dev);
+
+	nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
+	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
+	nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
+	nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
+	nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
+	nv_wr32(dev, 0x40009C           , 0x00000040);
+
+	if (dev_priv->chipset >= 0x25) {
+		nv_wr32(dev, 0x400890, 0x00080000);
+		nv_wr32(dev, 0x400610, 0x304B1FB6);
+		nv_wr32(dev, 0x400B80, 0x18B82880);
+		nv_wr32(dev, 0x400B84, 0x44000000);
+		nv_wr32(dev, 0x400098, 0x40000080);
+		nv_wr32(dev, 0x400B88, 0x000000ff);
+	} else {
+		nv_wr32(dev, 0x400880, 0x00080000); /* 0x0008c7df */
+		nv_wr32(dev, 0x400094, 0x00000005);
+		nv_wr32(dev, 0x400B80, 0x45CAA208); /* 0x45eae20e */
+		nv_wr32(dev, 0x400B84, 0x24000000);
+		nv_wr32(dev, 0x400098, 0x00000040);
+		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
+		nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
+		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
+		nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
+	}
+
+	/* copy tile info from PFB */
+	for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
+		nv_wr32(dev, 0x00400904 + i * 0x10,
+					nv_rd32(dev, NV10_PFB_TLIMIT(i)));
+			/* which is NV40_PGRAPH_TLIMIT0(i) ?? */
+		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + i * 4);
+		nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
+					nv_rd32(dev, NV10_PFB_TLIMIT(i)));
+		nv_wr32(dev, 0x00400908 + i * 0x10,
+					nv_rd32(dev, NV10_PFB_TSIZE(i)));
+			/* which is NV40_PGRAPH_TSIZE0(i) ?? */
+		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + i * 4);
+		nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
+					nv_rd32(dev, NV10_PFB_TSIZE(i)));
+		nv_wr32(dev, 0x00400900 + i * 0x10,
+					nv_rd32(dev, NV10_PFB_TILE(i)));
+			/* which is NV40_PGRAPH_TILE0(i) ?? */
+		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + i * 4);
+		nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
+					nv_rd32(dev, NV10_PFB_TILE(i)));
+	}
+	for (i = 0; i < 8; i++) {
+		nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
+		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4);
+		nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
+					nv_rd32(dev, 0x100300 + i * 4));
+	}
+	nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324));
+	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
+	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324));
+
+	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+	nv_wr32(dev, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
+
+	tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) & 0x0007ff00;
+	nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
+	tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) | 0x00020100;
+	nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
+
+	/* begin RAM config */
+	vramsz = drm_get_resource_len(dev, 0) - 1;
+	nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
+	nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
+	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
+	nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG0));
+	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
+	nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG1));
+	nv_wr32(dev, 0x400820, 0);
+	nv_wr32(dev, 0x400824, 0);
+	nv_wr32(dev, 0x400864, vramsz - 1);
+	nv_wr32(dev, 0x400868, vramsz - 1);
+
+	/* interesting.. the below overwrites some of the tile setup above.. */
+	nv_wr32(dev, 0x400B20, 0x00000000);
+	nv_wr32(dev, 0x400B04, 0xFFFFFFFF);
+
+	nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
+	nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
+	nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
+	nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
+
+	return 0;
+}
+
+void
+nv20_graph_takedown(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table);
+}
+
+int
+nv30_graph_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int ret, i;
+
+	nv_wr32(dev, NV03_PMC_ENABLE,
+		nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
+	nv_wr32(dev, NV03_PMC_ENABLE,
+		nv_rd32(dev, NV03_PMC_ENABLE) |  NV_PMC_ENABLE_PGRAPH);
+
+	if (!dev_priv->ctx_table) {
+		/* Create Context Pointer Table */
+		dev_priv->ctx_table_size = 32 * 4;
+		ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
+						  dev_priv->ctx_table_size, 16,
+						  NVOBJ_FLAG_ZERO_ALLOC,
+						  &dev_priv->ctx_table);
+		if (ret)
+			return ret;
+	}
+
+	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
+			dev_priv->ctx_table->instance >> 4);
+
+	nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
+	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
+	nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
+	nv_wr32(dev, 0x400890, 0x01b463ff);
+	nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
+	nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
+	nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
+	nv_wr32(dev, 0x400B80, 0x1003d888);
+	nv_wr32(dev, 0x400B84, 0x0c000000);
+	nv_wr32(dev, 0x400098, 0x00000000);
+	nv_wr32(dev, 0x40009C, 0x0005ad00);
+	nv_wr32(dev, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
+	nv_wr32(dev, 0x4000a0, 0x00000000);
+	nv_wr32(dev, 0x4000a4, 0x00000008);
+	nv_wr32(dev, 0x4008a8, 0xb784a400);
+	nv_wr32(dev, 0x400ba0, 0x002f8685);
+	nv_wr32(dev, 0x400ba4, 0x00231f3f);
+	nv_wr32(dev, 0x4008a4, 0x40000020);
+
+	if (dev_priv->chipset == 0x34) {
+		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
+		nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00200201);
+		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
+		nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000008);
+		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
+		nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000032);
+		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
+		nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000002);
+	}
+
+	nv_wr32(dev, 0x4000c0, 0x00000016);
+
+	/* copy tile info from PFB */
+	for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
+		nv_wr32(dev, 0x00400904 + i * 0x10,
+					nv_rd32(dev, NV10_PFB_TLIMIT(i)));
+			/* which is NV40_PGRAPH_TLIMIT0(i) ?? */
+		nv_wr32(dev, 0x00400908 + i * 0x10,
+					nv_rd32(dev, NV10_PFB_TSIZE(i)));
+			/* which is NV40_PGRAPH_TSIZE0(i) ?? */
+		nv_wr32(dev, 0x00400900 + i * 0x10,
+					nv_rd32(dev, NV10_PFB_TILE(i)));
+			/* which is NV40_PGRAPH_TILE0(i) ?? */
+	}
+
+	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+	nv_wr32(dev, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
+	nv_wr32(dev, 0x0040075c             , 0x00000001);
+
+	/* begin RAM config */
+	/* vramsz = drm_get_resource_len(dev, 0) - 1; */
+	nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
+	nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
+	if (dev_priv->chipset != 0x34) {
+		nv_wr32(dev, 0x400750, 0x00EA0000);
+		nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG0));
+		nv_wr32(dev, 0x400750, 0x00EA0004);
+		nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG1));
+	}
+
+	return 0;
+}
+
+struct nouveau_pgraph_object_class nv20_graph_grclass[] = {
+	{ 0x0030, false, NULL }, /* null */
+	{ 0x0039, false, NULL }, /* m2mf */
+	{ 0x004a, false, NULL }, /* gdirect */
+	{ 0x009f, false, NULL }, /* imageblit (nv12) */
+	{ 0x008a, false, NULL }, /* ifc */
+	{ 0x0089, false, NULL }, /* sifm */
+	{ 0x0062, false, NULL }, /* surf2d */
+	{ 0x0043, false, NULL }, /* rop */
+	{ 0x0012, false, NULL }, /* beta1 */
+	{ 0x0072, false, NULL }, /* beta4 */
+	{ 0x0019, false, NULL }, /* cliprect */
+	{ 0x0044, false, NULL }, /* pattern */
+	{ 0x009e, false, NULL }, /* swzsurf */
+	{ 0x0096, false, NULL }, /* celcius */
+	{ 0x0097, false, NULL }, /* kelvin (nv20) */
+	{ 0x0597, false, NULL }, /* kelvin (nv25) */
+	{}
+};
+
+struct nouveau_pgraph_object_class nv30_graph_grclass[] = {
+	{ 0x0030, false, NULL }, /* null */
+	{ 0x0039, false, NULL }, /* m2mf */
+	{ 0x004a, false, NULL }, /* gdirect */
+	{ 0x009f, false, NULL }, /* imageblit (nv12) */
+	{ 0x008a, false, NULL }, /* ifc */
+	{ 0x038a, false, NULL }, /* ifc (nv30) */
+	{ 0x0089, false, NULL }, /* sifm */
+	{ 0x0389, false, NULL }, /* sifm (nv30) */
+	{ 0x0062, false, NULL }, /* surf2d */
+	{ 0x0362, false, NULL }, /* surf2d (nv30) */
+	{ 0x0043, false, NULL }, /* rop */
+	{ 0x0012, false, NULL }, /* beta1 */
+	{ 0x0072, false, NULL }, /* beta4 */
+	{ 0x0019, false, NULL }, /* cliprect */
+	{ 0x0044, false, NULL }, /* pattern */
+	{ 0x039e, false, NULL }, /* swzsurf */
+	{ 0x0397, false, NULL }, /* rankine (nv30) */
+	{ 0x0497, false, NULL }, /* rankine (nv35) */
+	{ 0x0697, false, NULL }, /* rankine (nv34) */
+	{}
+};
+
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
new file mode 100644
index 0000000..ca1d271
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -0,0 +1,62 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv40_fb_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t fb_bar_size, tmp;
+	int num_tiles;
+	int i;
+
+	/* This is strictly a NV4x register (don't know about NV5x). */
+	/* The blob sets these to all kinds of values, and they mess up our setup. */
+	/* I got value 0x52802 instead. For some cards the blob even sets it back to 0x1. */
+	/* Note: the blob doesn't read this value, so i'm pretty sure this is safe for all cards. */
+	/* Any idea what this is? */
+	nv_wr32(dev, NV40_PFB_UNK_800, 0x1);
+
+	switch (dev_priv->chipset) {
+	case 0x40:
+	case 0x45:
+		tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2);
+		nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15));
+		num_tiles = NV10_PFB_TILE__SIZE;
+		break;
+	case 0x46: /* G72 */
+	case 0x47: /* G70 */
+	case 0x49: /* G71 */
+	case 0x4b: /* G73 */
+	case 0x4c: /* C51 (G7X version) */
+		num_tiles = NV40_PFB_TILE__SIZE_1;
+		break;
+	default:
+		num_tiles = NV40_PFB_TILE__SIZE_0;
+		break;
+	}
+
+	fb_bar_size = drm_get_resource_len(dev, 0) - 1;
+	switch (dev_priv->chipset) {
+	case 0x40:
+		for (i = 0; i < num_tiles; i++) {
+			nv_wr32(dev, NV10_PFB_TILE(i), 0);
+			nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size);
+		}
+		break;
+	default:
+		for (i = 0; i < num_tiles; i++) {
+			nv_wr32(dev, NV40_PFB_TILE(i), 0);
+			nv_wr32(dev, NV40_PFB_TLIMIT(i), fb_bar_size);
+		}
+		break;
+	}
+
+	return 0;
+}
+
+void
+nv40_fb_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
new file mode 100644
index 0000000..b4f19cc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -0,0 +1,314 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+#define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV40_RAMFC__SIZE))
+#define NV40_RAMFC__SIZE 128
+
+int
+nv40_fifo_create_context(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t fc = NV40_RAMFC(chan->id);
+	int ret;
+
+	ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
+				      NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
+				      NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc);
+	if (ret)
+		return ret;
+
+	dev_priv->engine.instmem.prepare_access(dev, true);
+	nv_wi32(dev, fc +  0, chan->pushbuf_base);
+	nv_wi32(dev, fc +  4, chan->pushbuf_base);
+	nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
+	nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+			      NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+			      NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
+#ifdef __BIG_ENDIAN
+			      NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+			      0x30000000 /* no idea.. */);
+	nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4);
+	nv_wi32(dev, fc + 60, 0x0001FFFF);
+	dev_priv->engine.instmem.finish_access(dev);
+
+	/* enable the fifo dma operation */
+	nv_wr32(dev, NV04_PFIFO_MODE,
+		nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
+	return 0;
+}
+
+void
+nv40_fifo_destroy_context(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+
+	nv_wr32(dev, NV04_PFIFO_MODE,
+		nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
+
+	if (chan->ramfc)
+		nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+}
+
+static void
+nv40_fifo_do_load_context(struct drm_device *dev, int chid)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t fc = NV40_RAMFC(chid), tmp, tmp2;
+
+	dev_priv->engine.instmem.prepare_access(dev, false);
+
+	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
+	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
+	nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
+	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, nv_ri32(dev, fc + 12));
+	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, nv_ri32(dev, fc + 16));
+	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 20));
+
+	/* No idea what 0x2058 is.. */
+	tmp   = nv_ri32(dev, fc + 24);
+	tmp2  = nv_rd32(dev, 0x2058) & 0xFFF;
+	tmp2 |= (tmp & 0x30000000);
+	nv_wr32(dev, 0x2058, tmp2);
+	tmp  &= ~0x30000000;
+	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, tmp);
+
+	nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 28));
+	nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 32));
+	nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 36));
+	tmp = nv_ri32(dev, fc + 40);
+	nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp);
+	nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 44));
+	nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 48));
+	nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 52));
+	nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, nv_ri32(dev, fc + 56));
+
+	/* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */
+	tmp  = nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF;
+	tmp |= nv_ri32(dev, fc + 60) & 0x1FFFF;
+	nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, tmp);
+
+	nv_wr32(dev, 0x32e4, nv_ri32(dev, fc + 64));
+	/* NVIDIA does this next line twice... */
+	nv_wr32(dev, 0x32e8, nv_ri32(dev, fc + 68));
+	nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76));
+	nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80));
+
+	dev_priv->engine.instmem.finish_access(dev);
+
+	nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
+	nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
+}
+
+int
+nv40_fifo_load_context(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	uint32_t tmp;
+
+	nv40_fifo_do_load_context(dev, chan->id);
+
+	/* Set channel active, and in DMA mode */
+	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
+		     NV40_PFIFO_CACHE1_PUSH1_DMA | chan->id);
+	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
+
+	/* Reset DMA_CTL_AT_INFO to INVALID */
+	tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
+	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
+
+	return 0;
+}
+
+int
+nv40_fifo_unload_context(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	uint32_t fc, tmp;
+	int chid;
+
+	chid = pfifo->channel_id(dev);
+	if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
+		return 0;
+	fc = NV40_RAMFC(chid);
+
+	dev_priv->engine.instmem.prepare_access(dev, true);
+	nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
+	nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
+	nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
+	nv_wi32(dev, fc + 12, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE));
+	nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT));
+	nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
+	tmp  = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH);
+	tmp |= nv_rd32(dev, 0x2058) & 0x30000000;
+	nv_wi32(dev, fc + 24, tmp);
+	nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
+	nv_wi32(dev, fc + 32, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
+	nv_wi32(dev, fc + 36, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
+	tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
+	nv_wi32(dev, fc + 40, tmp);
+	nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
+	nv_wi32(dev, fc + 48, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE));
+	/* NVIDIA read 0x3228 first, then write DMA_GET here.. maybe something
+	 * more involved depending on the value of 0x3228?
+	 */
+	nv_wi32(dev, fc + 52, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
+	nv_wi32(dev, fc + 56, nv_rd32(dev, NV40_PFIFO_GRCTX_INSTANCE));
+	nv_wi32(dev, fc + 60, nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & 0x1ffff);
+	/* No idea what the below is for exactly, ripped from a mmio-trace */
+	nv_wi32(dev, fc + 64, nv_rd32(dev, NV40_PFIFO_UNK32E4));
+	/* NVIDIA do this next line twice.. bug? */
+	nv_wi32(dev, fc + 68, nv_rd32(dev, 0x32e8));
+	nv_wi32(dev, fc + 76, nv_rd32(dev, 0x2088));
+	nv_wi32(dev, fc + 80, nv_rd32(dev, 0x3300));
+#if 0 /* no real idea which is PUT/GET in UNK_48.. */
+	tmp  = nv_rd32(dev, NV04_PFIFO_CACHE1_GET);
+	tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16);
+	nv_wi32(dev, fc + 72, tmp);
+#endif
+	dev_priv->engine.instmem.finish_access(dev);
+
+	nv40_fifo_do_load_context(dev, pfifo->channels - 1);
+	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
+		     NV40_PFIFO_CACHE1_PUSH1_DMA | (pfifo->channels - 1));
+	return 0;
+}
+
+static void
+nv40_fifo_init_reset(struct drm_device *dev)
+{
+	int i;
+
+	nv_wr32(dev, NV03_PMC_ENABLE,
+		nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
+	nv_wr32(dev, NV03_PMC_ENABLE,
+		nv_rd32(dev, NV03_PMC_ENABLE) |  NV_PMC_ENABLE_PFIFO);
+
+	nv_wr32(dev, 0x003224, 0x000f0078);
+	nv_wr32(dev, 0x003210, 0x00000000);
+	nv_wr32(dev, 0x003270, 0x00000000);
+	nv_wr32(dev, 0x003240, 0x00000000);
+	nv_wr32(dev, 0x003244, 0x00000000);
+	nv_wr32(dev, 0x003258, 0x00000000);
+	nv_wr32(dev, 0x002504, 0x00000000);
+	for (i = 0; i < 16; i++)
+		nv_wr32(dev, 0x002510 + (i * 4), 0x00000000);
+	nv_wr32(dev, 0x00250c, 0x0000ffff);
+	nv_wr32(dev, 0x002048, 0x00000000);
+	nv_wr32(dev, 0x003228, 0x00000000);
+	nv_wr32(dev, 0x0032e8, 0x00000000);
+	nv_wr32(dev, 0x002410, 0x00000000);
+	nv_wr32(dev, 0x002420, 0x00000000);
+	nv_wr32(dev, 0x002058, 0x00000001);
+	nv_wr32(dev, 0x00221c, 0x00000000);
+	/* something with 0x2084, read/modify/write, no change */
+	nv_wr32(dev, 0x002040, 0x000000ff);
+	nv_wr32(dev, 0x002500, 0x00000000);
+	nv_wr32(dev, 0x003200, 0x00000000);
+
+	nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff);
+}
+
+static void
+nv40_fifo_init_ramxx(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+				       ((dev_priv->ramht_bits - 9) << 16) |
+				       (dev_priv->ramht_offset >> 8));
+	nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
+
+	switch (dev_priv->chipset) {
+	case 0x47:
+	case 0x49:
+	case 0x4b:
+		nv_wr32(dev, 0x2230, 1);
+		break;
+	default:
+		break;
+	}
+
+	switch (dev_priv->chipset) {
+	case 0x40:
+	case 0x41:
+	case 0x42:
+	case 0x43:
+	case 0x45:
+	case 0x47:
+	case 0x48:
+	case 0x49:
+	case 0x4b:
+		nv_wr32(dev, NV40_PFIFO_RAMFC, 0x30002);
+		break;
+	default:
+		nv_wr32(dev, 0x2230, 0);
+		nv_wr32(dev, NV40_PFIFO_RAMFC,
+			((nouveau_mem_fb_amount(dev) - 512 * 1024 +
+			  dev_priv->ramfc_offset) >> 16) | (3 << 16));
+		break;
+	}
+}
+
+static void
+nv40_fifo_init_intr(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x002100, 0xffffffff);
+	nv_wr32(dev, 0x002140, 0xffffffff);
+}
+
+int
+nv40_fifo_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	int i;
+
+	nv40_fifo_init_reset(dev);
+	nv40_fifo_init_ramxx(dev);
+
+	nv40_fifo_do_load_context(dev, pfifo->channels - 1);
+	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
+
+	nv40_fifo_init_intr(dev);
+	pfifo->enable(dev);
+	pfifo->reassign(dev, true);
+
+	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+		if (dev_priv->fifos[i]) {
+			uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
+			nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
new file mode 100644
index 0000000..d3e0a2a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -0,0 +1,560 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+MODULE_FIRMWARE("nouveau/nv40.ctxprog");
+MODULE_FIRMWARE("nouveau/nv40.ctxvals");
+MODULE_FIRMWARE("nouveau/nv41.ctxprog");
+MODULE_FIRMWARE("nouveau/nv41.ctxvals");
+MODULE_FIRMWARE("nouveau/nv42.ctxprog");
+MODULE_FIRMWARE("nouveau/nv42.ctxvals");
+MODULE_FIRMWARE("nouveau/nv43.ctxprog");
+MODULE_FIRMWARE("nouveau/nv43.ctxvals");
+MODULE_FIRMWARE("nouveau/nv44.ctxprog");
+MODULE_FIRMWARE("nouveau/nv44.ctxvals");
+MODULE_FIRMWARE("nouveau/nv46.ctxprog");
+MODULE_FIRMWARE("nouveau/nv46.ctxvals");
+MODULE_FIRMWARE("nouveau/nv47.ctxprog");
+MODULE_FIRMWARE("nouveau/nv47.ctxvals");
+MODULE_FIRMWARE("nouveau/nv49.ctxprog");
+MODULE_FIRMWARE("nouveau/nv49.ctxvals");
+MODULE_FIRMWARE("nouveau/nv4a.ctxprog");
+MODULE_FIRMWARE("nouveau/nv4a.ctxvals");
+MODULE_FIRMWARE("nouveau/nv4b.ctxprog");
+MODULE_FIRMWARE("nouveau/nv4b.ctxvals");
+MODULE_FIRMWARE("nouveau/nv4c.ctxprog");
+MODULE_FIRMWARE("nouveau/nv4c.ctxvals");
+MODULE_FIRMWARE("nouveau/nv4e.ctxprog");
+MODULE_FIRMWARE("nouveau/nv4e.ctxvals");
+
+struct nouveau_channel *
+nv40_graph_channel(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t inst;
+	int i;
+
+	inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
+	if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
+		return NULL;
+	inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
+
+	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+		struct nouveau_channel *chan = dev_priv->fifos[i];
+
+		if (chan && chan->ramin_grctx &&
+		    chan->ramin_grctx->instance == inst)
+			return chan;
+	}
+
+	return NULL;
+}
+
+int
+nv40_graph_create_context(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj *ctx;
+	int ret;
+
+	/* Allocate a 175KiB block of PRAMIN to store the context.  This
+	 * is massive overkill for a lot of chipsets, but it should be safe
+	 * until we're able to implement this properly (will happen at more
+	 * or less the same time we're able to write our own context programs.
+	 */
+	ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 175*1024, 16,
+					  NVOBJ_FLAG_ZERO_ALLOC,
+					  &chan->ramin_grctx);
+	if (ret)
+		return ret;
+	ctx = chan->ramin_grctx->gpuobj;
+
+	/* Initialise default context values */
+	dev_priv->engine.instmem.prepare_access(dev, true);
+	nv40_grctx_vals_load(dev, ctx);
+	nv_wo32(dev, ctx, 0, ctx->im_pramin->start);
+	dev_priv->engine.instmem.finish_access(dev);
+
+	return 0;
+}
+
+void
+nv40_graph_destroy_context(struct nouveau_channel *chan)
+{
+	nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx);
+}
+
+static int
+nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
+{
+	uint32_t old_cp, tv = 1000, tmp;
+	int i;
+
+	old_cp = nv_rd32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER);
+	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
+
+	tmp  = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0310);
+	tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
+		      NV40_PGRAPH_CTXCTL_0310_XFER_LOAD;
+	nv_wr32(dev, NV40_PGRAPH_CTXCTL_0310, tmp);
+
+	tmp  = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0304);
+	tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX;
+	nv_wr32(dev, NV40_PGRAPH_CTXCTL_0304, tmp);
+
+	nouveau_wait_for_idle(dev);
+
+	for (i = 0; i < tv; i++) {
+		if (nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C) == 0)
+			break;
+	}
+
+	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
+
+	if (i == tv) {
+		uint32_t ucstat = nv_rd32(dev, NV40_PGRAPH_CTXCTL_UCODE_STAT);
+		NV_ERROR(dev, "Failed: Instance=0x%08x Save=%d\n", inst, save);
+		NV_ERROR(dev, "IP: 0x%02x, Opcode: 0x%08x\n",
+			 ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT,
+			 ucstat  & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK);
+		NV_ERROR(dev, "0x40030C = 0x%08x\n",
+			 nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+/* Restore the context for a specific channel into PGRAPH */
+int
+nv40_graph_load_context(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	uint32_t inst;
+	int ret;
+
+	if (!chan->ramin_grctx)
+		return -EINVAL;
+	inst = chan->ramin_grctx->instance >> 4;
+
+	ret = nv40_graph_transfer_context(dev, inst, 0);
+	if (ret)
+		return ret;
+
+	/* 0x40032C, no idea of it's exact function.  Could simply be a
+	 * record of the currently active PGRAPH context.  It's currently
+	 * unknown as to what bit 24 does.  The nv ddx has it set, so we will
+	 * set it here too.
+	 */
+	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
+	nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR,
+		 (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) |
+		  NV40_PGRAPH_CTXCTL_CUR_LOADED);
+	/* 0x32E0 records the instance address of the active FIFO's PGRAPH
+	 * context.  If at any time this doesn't match 0x40032C, you will
+	 * recieve PGRAPH_INTR_CONTEXT_SWITCH
+	 */
+	nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, inst);
+	return 0;
+}
+
+int
+nv40_graph_unload_context(struct drm_device *dev)
+{
+	uint32_t inst;
+	int ret;
+
+	inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
+	if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
+		return 0;
+	inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE;
+
+	ret = nv40_graph_transfer_context(dev, inst, 1);
+
+	nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst);
+	return ret;
+}
+
+struct nouveau_ctxprog {
+	uint32_t signature;
+	uint8_t  version;
+	uint16_t length;
+	uint32_t data[];
+} __attribute__ ((packed));
+
+struct nouveau_ctxvals {
+	uint32_t signature;
+	uint8_t  version;
+	uint32_t length;
+	struct {
+		uint32_t offset;
+		uint32_t value;
+	} data[];
+} __attribute__ ((packed));
+
+int
+nv40_grctx_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	const int chipset = dev_priv->chipset;
+	const struct firmware *fw;
+	const struct nouveau_ctxprog *cp;
+	const struct nouveau_ctxvals *cv;
+	char name[32];
+	int ret, i;
+
+	pgraph->accel_blocked = true;
+
+	if (!pgraph->ctxprog) {
+		sprintf(name, "nouveau/nv%02x.ctxprog", chipset);
+		ret = request_firmware(&fw, name, &dev->pdev->dev);
+		if (ret) {
+			NV_ERROR(dev, "No ctxprog for NV%02x\n", chipset);
+			return ret;
+		}
+
+		pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL);
+		if (!pgraph->ctxprog) {
+			NV_ERROR(dev, "OOM copying ctxprog\n");
+			release_firmware(fw);
+			return -ENOMEM;
+		}
+		memcpy(pgraph->ctxprog, fw->data, fw->size);
+
+		cp = pgraph->ctxprog;
+		if (cp->signature != 0x5043564e || cp->version != 0 ||
+		    cp->length != ((fw->size - 7) / 4)) {
+			NV_ERROR(dev, "ctxprog invalid\n");
+			release_firmware(fw);
+			nv40_grctx_fini(dev);
+			return -EINVAL;
+		}
+		release_firmware(fw);
+	}
+
+	if (!pgraph->ctxvals) {
+		sprintf(name, "nouveau/nv%02x.ctxvals", chipset);
+		ret = request_firmware(&fw, name, &dev->pdev->dev);
+		if (ret) {
+			NV_ERROR(dev, "No ctxvals for NV%02x\n", chipset);
+			nv40_grctx_fini(dev);
+			return ret;
+		}
+
+		pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
+		if (!pgraph->ctxprog) {
+			NV_ERROR(dev, "OOM copying ctxprog\n");
+			release_firmware(fw);
+			nv40_grctx_fini(dev);
+			return -ENOMEM;
+		}
+		memcpy(pgraph->ctxvals, fw->data, fw->size);
+
+		cv = (void *)pgraph->ctxvals;
+		if (cv->signature != 0x5643564e || cv->version != 0 ||
+		    cv->length != ((fw->size - 9) / 8)) {
+			NV_ERROR(dev, "ctxvals invalid\n");
+			release_firmware(fw);
+			nv40_grctx_fini(dev);
+			return -EINVAL;
+		}
+		release_firmware(fw);
+	}
+
+	cp = pgraph->ctxprog;
+
+	nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
+	for (i = 0; i < cp->length; i++)
+		nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp->data[i]);
+
+	pgraph->accel_blocked = false;
+	return 0;
+}
+
+void
+nv40_grctx_fini(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+
+	if (pgraph->ctxprog) {
+		kfree(pgraph->ctxprog);
+		pgraph->ctxprog = NULL;
+	}
+
+	if (pgraph->ctxvals) {
+		kfree(pgraph->ctxprog);
+		pgraph->ctxvals = NULL;
+	}
+}
+
+void
+nv40_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nouveau_ctxvals *cv = pgraph->ctxvals;
+	int i;
+
+	if (!cv)
+		return;
+
+	for (i = 0; i < cv->length; i++)
+		nv_wo32(dev, ctx, cv->data[i].offset, cv->data[i].value);
+}
+
+/*
+ * G70		0x47
+ * G71		0x49
+ * NV45		0x48
+ * G72[M]	0x46
+ * G73		0x4b
+ * C51_G7X	0x4c
+ * C51		0x4e
+ */
+int
+nv40_graph_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv =
+		(struct drm_nouveau_private *)dev->dev_private;
+	uint32_t vramsz, tmp;
+	int i, j;
+
+	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
+			~NV_PMC_ENABLE_PGRAPH);
+	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
+			 NV_PMC_ENABLE_PGRAPH);
+
+	nv40_grctx_init(dev);
+
+	/* No context present currently */
+	nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
+
+	nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
+	nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
+	nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
+	nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
+	nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
+	nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
+
+	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+	nv_wr32(dev, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
+
+	j = nv_rd32(dev, 0x1540) & 0xff;
+	if (j) {
+		for (i = 0; !(j & 1); j >>= 1, i++)
+			;
+		nv_wr32(dev, 0x405000, i);
+	}
+
+	if (dev_priv->chipset == 0x40) {
+		nv_wr32(dev, 0x4009b0, 0x83280fff);
+		nv_wr32(dev, 0x4009b4, 0x000000a0);
+	} else {
+		nv_wr32(dev, 0x400820, 0x83280eff);
+		nv_wr32(dev, 0x400824, 0x000000a0);
+	}
+
+	switch (dev_priv->chipset) {
+	case 0x40:
+	case 0x45:
+		nv_wr32(dev, 0x4009b8, 0x0078e366);
+		nv_wr32(dev, 0x4009bc, 0x0000014c);
+		break;
+	case 0x41:
+	case 0x42: /* pciid also 0x00Cx */
+	/* case 0x0120: XXX (pciid) */
+		nv_wr32(dev, 0x400828, 0x007596ff);
+		nv_wr32(dev, 0x40082c, 0x00000108);
+		break;
+	case 0x43:
+		nv_wr32(dev, 0x400828, 0x0072cb77);
+		nv_wr32(dev, 0x40082c, 0x00000108);
+		break;
+	case 0x44:
+	case 0x46: /* G72 */
+	case 0x4a:
+	case 0x4c: /* G7x-based C51 */
+	case 0x4e:
+		nv_wr32(dev, 0x400860, 0);
+		nv_wr32(dev, 0x400864, 0);
+		break;
+	case 0x47: /* G70 */
+	case 0x49: /* G71 */
+	case 0x4b: /* G73 */
+		nv_wr32(dev, 0x400828, 0x07830610);
+		nv_wr32(dev, 0x40082c, 0x0000016A);
+		break;
+	default:
+		break;
+	}
+
+	nv_wr32(dev, 0x400b38, 0x2ffff800);
+	nv_wr32(dev, 0x400b3c, 0x00006000);
+
+	/* copy tile info from PFB */
+	switch (dev_priv->chipset) {
+	case 0x40: /* vanilla NV40 */
+		for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
+			tmp = nv_rd32(dev, NV10_PFB_TILE(i));
+			nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
+			nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
+			tmp = nv_rd32(dev, NV10_PFB_TLIMIT(i));
+			nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
+			nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
+			tmp = nv_rd32(dev, NV10_PFB_TSIZE(i));
+			nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
+			nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
+			tmp = nv_rd32(dev, NV10_PFB_TSTATUS(i));
+			nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
+			nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
+		}
+		break;
+	case 0x44:
+	case 0x4a:
+	case 0x4e: /* NV44-based cores don't have 0x406900? */
+		for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) {
+			tmp = nv_rd32(dev, NV40_PFB_TILE(i));
+			nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
+			tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
+			nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
+			tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
+			nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
+			tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
+			nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
+		}
+		break;
+	case 0x46:
+	case 0x47:
+	case 0x49:
+	case 0x4b: /* G7X-based cores */
+		for (i = 0; i < NV40_PFB_TILE__SIZE_1; i++) {
+			tmp = nv_rd32(dev, NV40_PFB_TILE(i));
+			nv_wr32(dev, NV47_PGRAPH_TILE0(i), tmp);
+			nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
+			tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
+			nv_wr32(dev, NV47_PGRAPH_TLIMIT0(i), tmp);
+			nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
+			tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
+			nv_wr32(dev, NV47_PGRAPH_TSIZE0(i), tmp);
+			nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
+			tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
+			nv_wr32(dev, NV47_PGRAPH_TSTATUS0(i), tmp);
+			nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
+		}
+		break;
+	default: /* everything else */
+		for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) {
+			tmp = nv_rd32(dev, NV40_PFB_TILE(i));
+			nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
+			nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
+			tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
+			nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
+			nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
+			tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
+			nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
+			nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
+			tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
+			nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
+			nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
+		}
+		break;
+	}
+
+	/* begin RAM config */
+	vramsz = drm_get_resource_len(dev, 0) - 1;
+	switch (dev_priv->chipset) {
+	case 0x40:
+		nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
+		nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
+		nv_wr32(dev, 0x4069A4, nv_rd32(dev, NV04_PFB_CFG0));
+		nv_wr32(dev, 0x4069A8, nv_rd32(dev, NV04_PFB_CFG1));
+		nv_wr32(dev, 0x400820, 0);
+		nv_wr32(dev, 0x400824, 0);
+		nv_wr32(dev, 0x400864, vramsz);
+		nv_wr32(dev, 0x400868, vramsz);
+		break;
+	default:
+		switch (dev_priv->chipset) {
+		case 0x46:
+		case 0x47:
+		case 0x49:
+		case 0x4b:
+			nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
+			nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
+			break;
+		default:
+			nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
+			nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
+			break;
+		}
+		nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
+		nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
+		nv_wr32(dev, 0x400840, 0);
+		nv_wr32(dev, 0x400844, 0);
+		nv_wr32(dev, 0x4008A0, vramsz);
+		nv_wr32(dev, 0x4008A4, vramsz);
+		break;
+	}
+
+	return 0;
+}
+
+void nv40_graph_takedown(struct drm_device *dev)
+{
+}
+
+struct nouveau_pgraph_object_class nv40_graph_grclass[] = {
+	{ 0x0030, false, NULL }, /* null */
+	{ 0x0039, false, NULL }, /* m2mf */
+	{ 0x004a, false, NULL }, /* gdirect */
+	{ 0x009f, false, NULL }, /* imageblit (nv12) */
+	{ 0x008a, false, NULL }, /* ifc */
+	{ 0x0089, false, NULL }, /* sifm */
+	{ 0x3089, false, NULL }, /* sifm (nv40) */
+	{ 0x0062, false, NULL }, /* surf2d */
+	{ 0x3062, false, NULL }, /* surf2d (nv40) */
+	{ 0x0043, false, NULL }, /* rop */
+	{ 0x0012, false, NULL }, /* beta1 */
+	{ 0x0072, false, NULL }, /* beta4 */
+	{ 0x0019, false, NULL }, /* cliprect */
+	{ 0x0044, false, NULL }, /* pattern */
+	{ 0x309e, false, NULL }, /* swzsurf */
+	{ 0x4097, false, NULL }, /* curie (nv40) */
+	{ 0x4497, false, NULL }, /* curie (nv44) */
+	{}
+};
+
diff --git a/drivers/gpu/drm/nouveau/nv40_mc.c b/drivers/gpu/drm/nouveau/nv40_mc.c
new file mode 100644
index 0000000..2a3495e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_mc.c
@@ -0,0 +1,38 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv40_mc_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t tmp;
+
+	/* Power up everything, resetting each individual unit will
+	 * be done later if needed.
+	 */
+	nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
+
+	switch (dev_priv->chipset) {
+	case 0x44:
+	case 0x46: /* G72 */
+	case 0x4e:
+	case 0x4c: /* C51_G7X */
+		tmp = nv_rd32(dev, NV40_PFB_020C);
+		nv_wr32(dev, NV40_PMC_1700, tmp);
+		nv_wr32(dev, NV40_PMC_1704, 0);
+		nv_wr32(dev, NV40_PMC_1708, 0);
+		nv_wr32(dev, NV40_PMC_170C, tmp);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+void
+nv40_mc_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
new file mode 100644
index 0000000..f8e28a1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -0,0 +1,769 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_mode.h"
+#include "drm_crtc_helper.h"
+
+#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
+#include "nouveau_reg.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
+#include "nouveau_fb.h"
+#include "nouveau_connector.h"
+#include "nv50_display.h"
+
+static void
+nv50_crtc_lut_load(struct drm_crtc *crtc)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
+	int i;
+
+	NV_DEBUG(crtc->dev, "\n");
+
+	for (i = 0; i < 256; i++) {
+		writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0);
+		writew(nv_crtc->lut.g[i] >> 2, lut + 8*i + 2);
+		writew(nv_crtc->lut.b[i] >> 2, lut + 8*i + 4);
+	}
+
+	if (nv_crtc->lut.depth == 30) {
+		writew(nv_crtc->lut.r[i - 1] >> 2, lut + 8*i + 0);
+		writew(nv_crtc->lut.g[i - 1] >> 2, lut + 8*i + 2);
+		writew(nv_crtc->lut.b[i - 1] >> 2, lut + 8*i + 4);
+	}
+}
+
+int
+nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
+{
+	struct drm_device *dev = nv_crtc->base.dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *evo = dev_priv->evo;
+	int index = nv_crtc->index, ret;
+
+	NV_DEBUG(dev, "index %d\n", nv_crtc->index);
+	NV_DEBUG(dev, "%s\n", blanked ? "blanked" : "unblanked");
+
+	if (blanked) {
+		nv_crtc->cursor.hide(nv_crtc, false);
+
+		ret = RING_SPACE(evo, dev_priv->chipset != 0x50 ? 7 : 5);
+		if (ret) {
+			NV_ERROR(dev, "no space while blanking crtc\n");
+			return ret;
+		}
+		BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
+		OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK);
+		OUT_RING(evo, 0);
+		if (dev_priv->chipset != 0x50) {
+			BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
+			OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE);
+		}
+
+		BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
+		OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
+	} else {
+		if (nv_crtc->cursor.visible)
+			nv_crtc->cursor.show(nv_crtc, false);
+		else
+			nv_crtc->cursor.hide(nv_crtc, false);
+
+		ret = RING_SPACE(evo, dev_priv->chipset != 0x50 ? 10 : 8);
+		if (ret) {
+			NV_ERROR(dev, "no space while unblanking crtc\n");
+			return ret;
+		}
+		BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
+		OUT_RING(evo, nv_crtc->lut.depth == 8 ?
+				NV50_EVO_CRTC_CLUT_MODE_OFF :
+				NV50_EVO_CRTC_CLUT_MODE_ON);
+		OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.mm_node->start <<
+				 PAGE_SHIFT) >> 8);
+		if (dev_priv->chipset != 0x50) {
+			BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
+			OUT_RING(evo, NvEvoVRAM);
+		}
+
+		BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2);
+		OUT_RING(evo, nv_crtc->fb.offset >> 8);
+		OUT_RING(evo, 0);
+		BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
+		if (dev_priv->chipset != 0x50)
+			if (nv_crtc->fb.tile_flags == 0x7a00)
+				OUT_RING(evo, NvEvoFB32);
+			else
+			if (nv_crtc->fb.tile_flags == 0x7000)
+				OUT_RING(evo, NvEvoFB16);
+			else
+				OUT_RING(evo, NvEvoVRAM);
+		else
+			OUT_RING(evo, NvEvoVRAM);
+	}
+
+	nv_crtc->fb.blanked = blanked;
+	return 0;
+}
+
+static int
+nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
+{
+	struct drm_device *dev = nv_crtc->base.dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *evo = dev_priv->evo;
+	int ret;
+
+	NV_DEBUG(dev, "\n");
+
+	ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
+	if (ret) {
+		NV_ERROR(dev, "no space while setting dither\n");
+		return ret;
+	}
+
+	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DITHER_CTRL), 1);
+	if (on)
+		OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_ON);
+	else
+		OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_OFF);
+
+	if (update) {
+		BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+		OUT_RING(evo, 0);
+		FIRE_RING(evo);
+	}
+
+	return 0;
+}
+
+struct nouveau_connector *
+nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
+{
+	struct drm_device *dev = nv_crtc->base.dev;
+	struct drm_connector *connector;
+	struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
+
+	/* The safest approach is to find an encoder with the right crtc, that
+	 * is also linked to a connector. */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (connector->encoder)
+			if (connector->encoder->crtc == crtc)
+				return nouveau_connector(connector);
+	}
+
+	return NULL;
+}
+
+static int
+nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
+{
+	struct nouveau_connector *nv_connector =
+		nouveau_crtc_connector_get(nv_crtc);
+	struct drm_device *dev = nv_crtc->base.dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *evo = dev_priv->evo;
+	struct drm_display_mode *native_mode = NULL;
+	struct drm_display_mode *mode = &nv_crtc->base.mode;
+	uint32_t outX, outY, horiz, vert;
+	int ret;
+
+	NV_DEBUG(dev, "\n");
+
+	switch (scaling_mode) {
+	case DRM_MODE_SCALE_NONE:
+		break;
+	default:
+		if (!nv_connector || !nv_connector->native_mode) {
+			NV_ERROR(dev, "No native mode, forcing panel scaling\n");
+			scaling_mode = DRM_MODE_SCALE_NONE;
+		} else {
+			native_mode = nv_connector->native_mode;
+		}
+		break;
+	}
+
+	switch (scaling_mode) {
+	case DRM_MODE_SCALE_ASPECT:
+		horiz = (native_mode->hdisplay << 19) / mode->hdisplay;
+		vert = (native_mode->vdisplay << 19) / mode->vdisplay;
+
+		if (vert > horiz) {
+			outX = (mode->hdisplay * horiz) >> 19;
+			outY = (mode->vdisplay * horiz) >> 19;
+		} else {
+			outX = (mode->hdisplay * vert) >> 19;
+			outY = (mode->vdisplay * vert) >> 19;
+		}
+		break;
+	case DRM_MODE_SCALE_FULLSCREEN:
+		outX = native_mode->hdisplay;
+		outY = native_mode->vdisplay;
+		break;
+	case DRM_MODE_SCALE_CENTER:
+	case DRM_MODE_SCALE_NONE:
+	default:
+		outX = mode->hdisplay;
+		outY = mode->vdisplay;
+		break;
+	}
+
+	ret = RING_SPACE(evo, update ? 7 : 5);
+	if (ret)
+		return ret;
+
+	/* Got a better name for SCALER_ACTIVE? */
+	/* One day i've got to really figure out why this is needed. */
+	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
+	if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ||
+	    (mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+	    mode->hdisplay != outX || mode->vdisplay != outY) {
+		OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_ACTIVE);
+	} else {
+		OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_INACTIVE);
+	}
+
+	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
+	OUT_RING(evo, outY << 16 | outX);
+	OUT_RING(evo, outY << 16 | outX);
+
+	if (update) {
+		BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+		OUT_RING(evo, 0);
+		FIRE_RING(evo);
+	}
+
+	return 0;
+}
+
+int
+nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
+{
+	uint32_t pll_reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
+	struct nouveau_pll_vals pll;
+	struct pll_lims limits;
+	uint32_t reg1, reg2;
+	int ret;
+
+	ret = get_pll_limits(dev, pll_reg, &limits);
+	if (ret)
+		return ret;
+
+	ret = nouveau_calc_pll_mnp(dev, &limits, pclk, &pll);
+	if (ret <= 0)
+		return ret;
+
+	if (limits.vco2.maxfreq) {
+		reg1 = nv_rd32(dev, pll_reg + 4) & 0xff00ff00;
+		reg2 = nv_rd32(dev, pll_reg + 8) & 0x8000ff00;
+		nv_wr32(dev, pll_reg, 0x10000611);
+		nv_wr32(dev, pll_reg + 4, reg1 | (pll.M1 << 16) | pll.N1);
+		nv_wr32(dev, pll_reg + 8,
+			reg2 | (pll.log2P << 28) | (pll.M2 << 16) | pll.N2);
+	} else {
+		reg1 = nv_rd32(dev, pll_reg + 4) & 0xffc00000;
+		nv_wr32(dev, pll_reg, 0x50000610);
+		nv_wr32(dev, pll_reg + 4, reg1 |
+			(pll.log2P << 16) | (pll.M1 << 8) | pll.N1);
+	}
+
+	return 0;
+}
+
+static void
+nv50_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+	NV_DEBUG(dev, "\n");
+
+	if (!crtc)
+		return;
+
+	drm_crtc_cleanup(&nv_crtc->base);
+
+	nv50_cursor_fini(nv_crtc);
+
+	nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
+	nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+	kfree(nv_crtc->mode);
+	kfree(nv_crtc);
+}
+
+int
+nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+		     uint32_t buffer_handle, uint32_t width, uint32_t height)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct nouveau_bo *cursor = NULL;
+	struct drm_gem_object *gem;
+	int ret = 0, i;
+
+	if (width != 64 || height != 64)
+		return -EINVAL;
+
+	if (!buffer_handle) {
+		nv_crtc->cursor.hide(nv_crtc, true);
+		return 0;
+	}
+
+	gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
+	if (!gem)
+		return -EINVAL;
+	cursor = nouveau_gem_object(gem);
+
+	ret = nouveau_bo_map(cursor);
+	if (ret)
+		goto out;
+
+	/* The simple will do for now. */
+	for (i = 0; i < 64 * 64; i++)
+		nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, nouveau_bo_rd32(cursor, i));
+
+	nouveau_bo_unmap(cursor);
+
+	nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset -
+					    dev_priv->vm_vram_base);
+	nv_crtc->cursor.show(nv_crtc, true);
+
+out:
+	mutex_lock(&dev->struct_mutex);
+	drm_gem_object_unreference(gem);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+int
+nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+	nv_crtc->cursor.set_pos(nv_crtc, x, y);
+	return 0;
+}
+
+static void
+nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+		    uint32_t size)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	int i;
+
+	if (size != 256)
+		return;
+
+	for (i = 0; i < 256; i++) {
+		nv_crtc->lut.r[i] = r[i];
+		nv_crtc->lut.g[i] = g[i];
+		nv_crtc->lut.b[i] = b[i];
+	}
+
+	/* We need to know the depth before we upload, but it's possible to
+	 * get called before a framebuffer is bound.  If this is the case,
+	 * mark the lut values as dirty by setting depth==0, and it'll be
+	 * uploaded on the first mode_set_base()
+	 */
+	if (!nv_crtc->base.fb) {
+		nv_crtc->lut.depth = 0;
+		return;
+	}
+
+	nv50_crtc_lut_load(crtc);
+}
+
+static void
+nv50_crtc_save(struct drm_crtc *crtc)
+{
+	NV_ERROR(crtc->dev, "!!\n");
+}
+
+static void
+nv50_crtc_restore(struct drm_crtc *crtc)
+{
+	NV_ERROR(crtc->dev, "!!\n");
+}
+
+static const struct drm_crtc_funcs nv50_crtc_funcs = {
+	.save = nv50_crtc_save,
+	.restore = nv50_crtc_restore,
+	.cursor_set = nv50_crtc_cursor_set,
+	.cursor_move = nv50_crtc_cursor_move,
+	.gamma_set = nv50_crtc_gamma_set,
+	.set_config = drm_crtc_helper_set_config,
+	.destroy = nv50_crtc_destroy,
+};
+
+static void
+nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+}
+
+static void
+nv50_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_encoder *encoder;
+
+	NV_DEBUG(dev, "index %d\n", nv_crtc->index);
+
+	/* Disconnect all unused encoders. */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+		if (drm_helper_encoder_in_use(encoder))
+			continue;
+
+		nv_encoder->disconnect(nv_encoder);
+	}
+
+	nv50_crtc_blank(nv_crtc, true);
+}
+
+static void
+nv50_crtc_commit(struct drm_crtc *crtc)
+{
+	struct drm_crtc *crtc2;
+	struct drm_device *dev = crtc->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *evo = dev_priv->evo;
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	int ret;
+
+	NV_DEBUG(dev, "index %d\n", nv_crtc->index);
+
+	nv50_crtc_blank(nv_crtc, false);
+
+	/* Explicitly blank all unused crtc's. */
+	list_for_each_entry(crtc2, &dev->mode_config.crtc_list, head) {
+		if (!drm_helper_crtc_in_use(crtc2))
+			nv50_crtc_blank(nouveau_crtc(crtc2), true);
+	}
+
+	ret = RING_SPACE(evo, 2);
+	if (ret) {
+		NV_ERROR(dev, "no space while committing crtc\n");
+		return;
+	}
+	BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+	OUT_RING(evo, 0);
+	FIRE_RING(evo);
+}
+
+static bool
+nv50_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
+		     struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static int
+nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y,
+			   struct drm_framebuffer *old_fb, bool update)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct drm_device *dev = nv_crtc->base.dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *evo = dev_priv->evo;
+	struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
+	struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+	int ret, format;
+
+	NV_DEBUG(dev, "index %d\n", nv_crtc->index);
+
+	switch (drm_fb->depth) {
+	case  8:
+		format = NV50_EVO_CRTC_FB_DEPTH_8;
+		break;
+	case 15:
+		format = NV50_EVO_CRTC_FB_DEPTH_15;
+		break;
+	case 16:
+		format = NV50_EVO_CRTC_FB_DEPTH_16;
+		break;
+	case 24:
+	case 32:
+		format = NV50_EVO_CRTC_FB_DEPTH_24;
+		break;
+	case 30:
+		format = NV50_EVO_CRTC_FB_DEPTH_30;
+		break;
+	default:
+		 NV_ERROR(dev, "unknown depth %d\n", drm_fb->depth);
+		 return -EINVAL;
+	}
+
+	ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
+	if (ret)
+		return ret;
+
+	if (old_fb) {
+		struct nouveau_framebuffer *ofb = nouveau_framebuffer(old_fb);
+		nouveau_bo_unpin(ofb->nvbo);
+	}
+
+	nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base;
+	nv_crtc->fb.tile_flags = fb->nvbo->tile_flags;
+	nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
+	if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
+		ret = RING_SPACE(evo, 2);
+		if (ret)
+			return ret;
+
+		BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
+		if (nv_crtc->fb.tile_flags == 0x7a00)
+			OUT_RING(evo, NvEvoFB32);
+		else
+		if (nv_crtc->fb.tile_flags == 0x7000)
+			OUT_RING(evo, NvEvoFB16);
+		else
+			OUT_RING(evo, NvEvoVRAM);
+	}
+
+	ret = RING_SPACE(evo, 12);
+	if (ret)
+		return ret;
+
+	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
+	OUT_RING(evo, nv_crtc->fb.offset >> 8);
+	OUT_RING(evo, 0);
+	OUT_RING(evo, (drm_fb->height << 16) | drm_fb->width);
+	if (!nv_crtc->fb.tile_flags) {
+		OUT_RING(evo, drm_fb->pitch | (1 << 20));
+	} else {
+		OUT_RING(evo, ((drm_fb->pitch / 4) << 4) |
+				  fb->nvbo->tile_mode);
+	}
+	if (dev_priv->chipset == 0x50)
+		OUT_RING(evo, (fb->nvbo->tile_flags << 8) | format);
+	else
+		OUT_RING(evo, format);
+
+	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
+	OUT_RING(evo, fb->base.depth == 8 ?
+		 NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
+
+	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
+	OUT_RING(evo, NV50_EVO_CRTC_COLOR_CTRL_COLOR);
+	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
+	OUT_RING(evo, (y << 16) | x);
+
+	if (nv_crtc->lut.depth != fb->base.depth) {
+		nv_crtc->lut.depth = fb->base.depth;
+		nv50_crtc_lut_load(crtc);
+	}
+
+	if (update) {
+		ret = RING_SPACE(evo, 2);
+		if (ret)
+			return ret;
+		BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+		OUT_RING(evo, 0);
+		FIRE_RING(evo);
+	}
+
+	return 0;
+}
+
+static int
+nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+		   struct drm_display_mode *adjusted_mode, int x, int y,
+		   struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *evo = dev_priv->evo;
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct nouveau_connector *nv_connector = NULL;
+	uint32_t hsync_dur,  vsync_dur, hsync_start_to_end, vsync_start_to_end;
+	uint32_t hunk1, vunk1, vunk2a, vunk2b;
+	int ret;
+
+	/* Find the connector attached to this CRTC */
+	nv_connector = nouveau_crtc_connector_get(nv_crtc);
+
+	*nv_crtc->mode = *adjusted_mode;
+
+	NV_DEBUG(dev, "index %d\n", nv_crtc->index);
+
+	hsync_dur = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
+	vsync_dur = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
+	hsync_start_to_end = adjusted_mode->htotal - adjusted_mode->hsync_start;
+	vsync_start_to_end = adjusted_mode->vtotal - adjusted_mode->vsync_start;
+	/* I can't give this a proper name, anyone else can? */
+	hunk1 = adjusted_mode->htotal -
+		adjusted_mode->hsync_start + adjusted_mode->hdisplay;
+	vunk1 = adjusted_mode->vtotal -
+		adjusted_mode->vsync_start + adjusted_mode->vdisplay;
+	/* Another strange value, this time only for interlaced adjusted_modes. */
+	vunk2a = 2 * adjusted_mode->vtotal -
+		 adjusted_mode->vsync_start + adjusted_mode->vdisplay;
+	vunk2b = adjusted_mode->vtotal -
+		 adjusted_mode->vsync_start + adjusted_mode->vtotal;
+
+	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		vsync_dur /= 2;
+		vsync_start_to_end  /= 2;
+		vunk1 /= 2;
+		vunk2a /= 2;
+		vunk2b /= 2;
+		/* magic */
+		if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) {
+			vsync_start_to_end -= 1;
+			vunk1 -= 1;
+			vunk2a -= 1;
+			vunk2b -= 1;
+		}
+	}
+
+	ret = RING_SPACE(evo, 17);
+	if (ret)
+		return ret;
+
+	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLOCK), 2);
+	OUT_RING(evo, adjusted_mode->clock | 0x800000);
+	OUT_RING(evo, (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 0);
+
+	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DISPLAY_START), 5);
+	OUT_RING(evo, 0);
+	OUT_RING(evo, (adjusted_mode->vtotal << 16) | adjusted_mode->htotal);
+	OUT_RING(evo, (vsync_dur - 1) << 16 | (hsync_dur - 1));
+	OUT_RING(evo, (vsync_start_to_end - 1) << 16 |
+			(hsync_start_to_end - 1));
+	OUT_RING(evo, (vunk1 - 1) << 16 | (hunk1 - 1));
+
+	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK0824), 1);
+		OUT_RING(evo, (vunk2b - 1) << 16 | (vunk2a - 1));
+	} else {
+		OUT_RING(evo, 0);
+		OUT_RING(evo, 0);
+	}
+
+	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK082C), 1);
+	OUT_RING(evo, 0);
+
+	/* This is the actual resolution of the mode. */
+	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, REAL_RES), 1);
+	OUT_RING(evo, (mode->vdisplay << 16) | mode->hdisplay);
+	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CENTER_OFFSET), 1);
+	OUT_RING(evo, NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(0, 0));
+
+	nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false);
+	nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false);
+
+	return nv50_crtc_do_mode_set_base(crtc, x, y, old_fb, false);
+}
+
+static int
+nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+			struct drm_framebuffer *old_fb)
+{
+	return nv50_crtc_do_mode_set_base(crtc, x, y, old_fb, true);
+}
+
+static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
+	.dpms = nv50_crtc_dpms,
+	.prepare = nv50_crtc_prepare,
+	.commit = nv50_crtc_commit,
+	.mode_fixup = nv50_crtc_mode_fixup,
+	.mode_set = nv50_crtc_mode_set,
+	.mode_set_base = nv50_crtc_mode_set_base,
+	.load_lut = nv50_crtc_lut_load,
+};
+
+int
+nv50_crtc_create(struct drm_device *dev, int index)
+{
+	struct nouveau_crtc *nv_crtc = NULL;
+	int ret, i;
+
+	NV_DEBUG(dev, "\n");
+
+	nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
+	if (!nv_crtc)
+		return -ENOMEM;
+
+	nv_crtc->mode = kzalloc(sizeof(*nv_crtc->mode), GFP_KERNEL);
+	if (!nv_crtc->mode) {
+		kfree(nv_crtc);
+		return -ENOMEM;
+	}
+
+	/* Default CLUT parameters, will be activated on the hw upon
+	 * first mode set.
+	 */
+	for (i = 0; i < 256; i++) {
+		nv_crtc->lut.r[i] = i << 8;
+		nv_crtc->lut.g[i] = i << 8;
+		nv_crtc->lut.b[i] = i << 8;
+	}
+	nv_crtc->lut.depth = 0;
+
+	ret = nouveau_bo_new(dev, NULL, 4096, 0x100, TTM_PL_FLAG_VRAM,
+			     0, 0x0000, false, true, &nv_crtc->lut.nvbo);
+	if (!ret) {
+		ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
+		if (!ret)
+			ret = nouveau_bo_map(nv_crtc->lut.nvbo);
+		if (ret)
+			nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
+	}
+
+	if (ret) {
+		kfree(nv_crtc->mode);
+		kfree(nv_crtc);
+		return ret;
+	}
+
+	nv_crtc->index = index;
+
+	/* set function pointers */
+	nv_crtc->set_dither = nv50_crtc_set_dither;
+	nv_crtc->set_scale = nv50_crtc_set_scale;
+
+	drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
+	drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
+	drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
+
+	ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
+			     0, 0x0000, false, true, &nv_crtc->cursor.nvbo);
+	if (!ret) {
+		ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
+		if (!ret)
+			ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
+		if (ret)
+			nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+	}
+
+	nv50_cursor_init(nv_crtc);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
new file mode 100644
index 0000000..e2e79a8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_mode.h"
+
+#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
+#include "nouveau_reg.h"
+#include "nouveau_drv.h"
+#include "nouveau_crtc.h"
+#include "nv50_display.h"
+
+static void
+nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
+{
+	struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
+	struct nouveau_channel *evo = dev_priv->evo;
+	struct drm_device *dev = nv_crtc->base.dev;
+	int ret;
+
+	NV_DEBUG(dev, "\n");
+
+	if (update && nv_crtc->cursor.visible)
+		return;
+
+	ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2);
+	if (ret) {
+		NV_ERROR(dev, "no space while unhiding cursor\n");
+		return;
+	}
+
+	if (dev_priv->chipset != 0x50) {
+		BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
+		OUT_RING(evo, NvEvoVRAM);
+	}
+	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
+	OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW);
+	OUT_RING(evo, nv_crtc->cursor.offset >> 8);
+
+	if (update) {
+		BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+		OUT_RING(evo, 0);
+		FIRE_RING(evo);
+		nv_crtc->cursor.visible = true;
+	}
+}
+
+static void
+nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
+{
+	struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
+	struct nouveau_channel *evo = dev_priv->evo;
+	struct drm_device *dev = nv_crtc->base.dev;
+	int ret;
+
+	NV_DEBUG(dev, "\n");
+
+	if (update && !nv_crtc->cursor.visible)
+		return;
+
+	ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2);
+	if (ret) {
+		NV_ERROR(dev, "no space while hiding cursor\n");
+		return;
+	}
+	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
+	OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE);
+	OUT_RING(evo, 0);
+	if (dev_priv->chipset != 0x50) {
+		BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
+		OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE);
+	}
+
+	if (update) {
+		BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+		OUT_RING(evo, 0);
+		FIRE_RING(evo);
+		nv_crtc->cursor.visible = false;
+	}
+}
+
+static void
+nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
+{
+	struct drm_device *dev = nv_crtc->base.dev;
+
+	nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index),
+		((y & 0xFFFF) << 16) | (x & 0xFFFF));
+	/* Needed to make the cursor move. */
+	nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS_CTRL(nv_crtc->index), 0);
+}
+
+static void
+nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
+{
+	NV_DEBUG(nv_crtc->base.dev, "\n");
+	if (offset == nv_crtc->cursor.offset)
+		return;
+
+	nv_crtc->cursor.offset = offset;
+	if (nv_crtc->cursor.visible) {
+		nv_crtc->cursor.visible = false;
+		nv_crtc->cursor.show(nv_crtc, true);
+	}
+}
+
+int
+nv50_cursor_init(struct nouveau_crtc *nv_crtc)
+{
+	nv_crtc->cursor.set_offset = nv50_cursor_set_offset;
+	nv_crtc->cursor.set_pos = nv50_cursor_set_pos;
+	nv_crtc->cursor.hide = nv50_cursor_hide;
+	nv_crtc->cursor.show = nv50_cursor_show;
+	return 0;
+}
+
+void
+nv50_cursor_fini(struct nouveau_crtc *nv_crtc)
+{
+	struct drm_device *dev = nv_crtc->base.dev;
+	int idx = nv_crtc->index;
+
+	NV_DEBUG(dev, "\n");
+
+	nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0);
+	if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx),
+		     NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
+		NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
+		NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
+			 nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx)));
+	}
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
new file mode 100644
index 0000000..fb5838e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+
+#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
+#include "nouveau_reg.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_crtc.h"
+#include "nv50_display.h"
+
+static void
+nv50_dac_disconnect(struct nouveau_encoder *nv_encoder)
+{
+	struct drm_device *dev = to_drm_encoder(nv_encoder)->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *evo = dev_priv->evo;
+	int ret;
+
+	NV_DEBUG(dev, "Disconnecting DAC %d\n", nv_encoder->or);
+
+	ret = RING_SPACE(evo, 2);
+	if (ret) {
+		NV_ERROR(dev, "no space while disconnecting DAC\n");
+		return;
+	}
+	BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
+	OUT_RING(evo, 0);
+}
+
+static enum drm_connector_status
+nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	enum drm_connector_status status = connector_status_disconnected;
+	uint32_t dpms_state, load_pattern, load_state;
+	int or = nv_encoder->or;
+
+	nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001);
+	dpms_state = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or));
+
+	nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
+		0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
+	if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or),
+		     NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
+		NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
+		NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
+			  nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
+		return status;
+	}
+
+	/* Use bios provided value if possible. */
+	if (dev_priv->vbios->dactestval) {
+		load_pattern = dev_priv->vbios->dactestval;
+		NV_DEBUG(dev, "Using bios provided load_pattern of %d\n",
+			  load_pattern);
+	} else {
+		load_pattern = 340;
+		NV_DEBUG(dev, "Using default load_pattern of %d\n",
+			 load_pattern);
+	}
+
+	nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or),
+		NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern);
+	mdelay(45); /* give it some time to process */
+	load_state = nv_rd32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or));
+
+	nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0);
+	nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state |
+		NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
+
+	if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) ==
+			  NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT)
+		status = connector_status_connected;
+
+	if (status == connector_status_connected)
+		NV_DEBUG(dev, "Load was detected on output with or %d\n", or);
+	else
+		NV_DEBUG(dev, "Load was not detected on output with or %d\n", or);
+
+	return status;
+}
+
+static void
+nv50_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	uint32_t val;
+	int or = nv_encoder->or;
+
+	NV_DEBUG(dev, "or %d mode %d\n", or, mode);
+
+	/* wait for it to be done */
+	if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or),
+		     NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
+		NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
+		NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
+			 nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
+		return;
+	}
+
+	val = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F;
+
+	if (mode != DRM_MODE_DPMS_ON)
+		val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED;
+
+	switch (mode) {
+	case DRM_MODE_DPMS_STANDBY:
+		val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
+		break;
+	case DRM_MODE_DPMS_SUSPEND:
+		val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
+		break;
+	case DRM_MODE_DPMS_OFF:
+		val |= NV50_PDISPLAY_DAC_DPMS_CTRL_OFF;
+		val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
+		val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
+		break;
+	default:
+		break;
+	}
+
+	nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val |
+		NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
+}
+
+static void
+nv50_dac_save(struct drm_encoder *encoder)
+{
+	NV_ERROR(encoder->dev, "!!\n");
+}
+
+static void
+nv50_dac_restore(struct drm_encoder *encoder)
+{
+	NV_ERROR(encoder->dev, "!!\n");
+}
+
+static bool
+nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+		    struct drm_display_mode *adjusted_mode)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_connector *connector;
+
+	NV_DEBUG(encoder->dev, "or %d\n", nv_encoder->or);
+
+	connector = nouveau_encoder_connector_get(nv_encoder);
+	if (!connector) {
+		NV_ERROR(encoder->dev, "Encoder has no connector\n");
+		return false;
+	}
+
+	if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
+	     connector->native_mode) {
+		int id = adjusted_mode->base.id;
+		*adjusted_mode = *connector->native_mode;
+		adjusted_mode->base.id = id;
+	}
+
+	return true;
+}
+
+static void
+nv50_dac_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_dac_commit(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+		  struct drm_display_mode *adjusted_mode)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *evo = dev_priv->evo;
+	struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
+	uint32_t mode_ctl = 0, mode_ctl2 = 0;
+	int ret;
+
+	NV_DEBUG(dev, "or %d\n", nv_encoder->or);
+
+	nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
+
+	if (crtc->index == 1)
+		mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC1;
+	else
+		mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0;
+
+	/* Lacking a working tv-out, this is not a 100% sure. */
+	if (nv_encoder->dcb->type == OUTPUT_ANALOG)
+		mode_ctl |= 0x40;
+	else
+	if (nv_encoder->dcb->type == OUTPUT_TV)
+		mode_ctl |= 0x100;
+
+	if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+		mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NHSYNC;
+
+	if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+		mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NVSYNC;
+
+	ret = RING_SPACE(evo, 3);
+	if (ret) {
+		NV_ERROR(dev, "no space while connecting DAC\n");
+		return;
+	}
+	BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
+	OUT_RING(evo, mode_ctl);
+	OUT_RING(evo, mode_ctl2);
+}
+
+static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
+	.dpms = nv50_dac_dpms,
+	.save = nv50_dac_save,
+	.restore = nv50_dac_restore,
+	.mode_fixup = nv50_dac_mode_fixup,
+	.prepare = nv50_dac_prepare,
+	.commit = nv50_dac_commit,
+	.mode_set = nv50_dac_mode_set,
+	.detect = nv50_dac_detect
+};
+
+static void
+nv50_dac_destroy(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+	if (!encoder)
+		return;
+
+	NV_DEBUG(encoder->dev, "\n");
+
+	drm_encoder_cleanup(encoder);
+	kfree(nv_encoder);
+}
+
+static const struct drm_encoder_funcs nv50_dac_encoder_funcs = {
+	.destroy = nv50_dac_destroy,
+};
+
+int
+nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry)
+{
+	struct nouveau_encoder *nv_encoder;
+	struct drm_encoder *encoder;
+
+	NV_DEBUG(dev, "\n");
+	NV_INFO(dev, "Detected a DAC output\n");
+
+	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+	if (!nv_encoder)
+		return -ENOMEM;
+	encoder = to_drm_encoder(nv_encoder);
+
+	nv_encoder->dcb = entry;
+	nv_encoder->or = ffs(entry->or) - 1;
+
+	nv_encoder->disconnect = nv50_dac_disconnect;
+
+	drm_encoder_init(dev, encoder, &nv50_dac_encoder_funcs,
+			 DRM_MODE_ENCODER_DAC);
+	drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs);
+
+	encoder->possible_crtcs = entry->heads;
+	encoder->possible_clones = 0;
+	return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
new file mode 100644
index 0000000..12c5ee6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -0,0 +1,1015 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "nv50_display.h"
+#include "nouveau_crtc.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_fb.h"
+#include "drm_crtc_helper.h"
+
+static void
+nv50_evo_channel_del(struct nouveau_channel **pchan)
+{
+	struct nouveau_channel *chan = *pchan;
+
+	if (!chan)
+		return;
+	*pchan = NULL;
+
+	nouveau_gpuobj_channel_takedown(chan);
+	nouveau_bo_ref(NULL, &chan->pushbuf_bo);
+
+	if (chan->user)
+		iounmap(chan->user);
+
+	kfree(chan);
+}
+
+static int
+nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
+		    uint32_t tile_flags, uint32_t magic_flags,
+		    uint32_t offset, uint32_t limit)
+{
+	struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
+	struct drm_device *dev = evo->dev;
+	struct nouveau_gpuobj *obj = NULL;
+	int ret;
+
+	ret = nouveau_gpuobj_new(dev, evo, 6*4, 32, 0, &obj);
+	if (ret)
+		return ret;
+	obj->engine = NVOBJ_ENGINE_DISPLAY;
+
+	ret = nouveau_gpuobj_ref_add(dev, evo, name, obj, NULL);
+	if (ret) {
+		nouveau_gpuobj_del(dev, &obj);
+		return ret;
+	}
+
+	dev_priv->engine.instmem.prepare_access(dev, true);
+	nv_wo32(dev, obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
+	nv_wo32(dev, obj, 1, limit);
+	nv_wo32(dev, obj, 2, offset);
+	nv_wo32(dev, obj, 3, 0x00000000);
+	nv_wo32(dev, obj, 4, 0x00000000);
+	nv_wo32(dev, obj, 5, 0x00010000);
+	dev_priv->engine.instmem.finish_access(dev);
+
+	return 0;
+}
+
+static int
+nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan;
+	int ret;
+
+	chan = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
+	if (!chan)
+		return -ENOMEM;
+	*pchan = chan;
+
+	chan->id = -1;
+	chan->dev = dev;
+	chan->user_get = 4;
+	chan->user_put = 0;
+
+	INIT_LIST_HEAD(&chan->ramht_refs);
+
+	ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32768, 0x1000,
+				     NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
+	if (ret) {
+		NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
+		nv50_evo_channel_del(pchan);
+		return ret;
+	}
+
+	ret = nouveau_mem_init_heap(&chan->ramin_heap, chan->ramin->gpuobj->
+				    im_pramin->start, 32768);
+	if (ret) {
+		NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
+		nv50_evo_channel_del(pchan);
+		return ret;
+	}
+
+	ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 4096, 16,
+				     0, &chan->ramht);
+	if (ret) {
+		NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
+		nv50_evo_channel_del(pchan);
+		return ret;
+	}
+
+	if (dev_priv->chipset != 0x50) {
+		ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19,
+					  0, 0xffffffff);
+		if (ret) {
+			nv50_evo_channel_del(pchan);
+			return ret;
+		}
+
+
+		ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB32, 0x7a, 0x19,
+					  0, 0xffffffff);
+		if (ret) {
+			nv50_evo_channel_del(pchan);
+			return ret;
+		}
+	}
+
+	ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19,
+				  0, nouveau_mem_fb_amount(dev));
+	if (ret) {
+		nv50_evo_channel_del(pchan);
+		return ret;
+	}
+
+	ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
+			     false, true, &chan->pushbuf_bo);
+	if (ret == 0)
+		ret = nouveau_bo_pin(chan->pushbuf_bo, TTM_PL_FLAG_VRAM);
+	if (ret) {
+		NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
+		nv50_evo_channel_del(pchan);
+		return ret;
+	}
+
+	ret = nouveau_bo_map(chan->pushbuf_bo);
+	if (ret) {
+		NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
+		nv50_evo_channel_del(pchan);
+		return ret;
+	}
+
+	chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+					NV50_PDISPLAY_USER(0), PAGE_SIZE);
+	if (!chan->user) {
+		NV_ERROR(dev, "Error mapping EVO control regs.\n");
+		nv50_evo_channel_del(pchan);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+int
+nv50_display_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+	struct nouveau_channel *evo = dev_priv->evo;
+	struct drm_connector *connector;
+	uint32_t val, ram_amount, hpd_en[2];
+	uint64_t start;
+	int ret, i;
+
+	NV_DEBUG(dev, "\n");
+
+	nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004));
+	/*
+	 * I think the 0x006101XX range is some kind of main control area
+	 * that enables things.
+	 */
+	/* CRTC? */
+	for (i = 0; i < 2; i++) {
+		val = nv_rd32(dev, 0x00616100 + (i * 0x800));
+		nv_wr32(dev, 0x00610190 + (i * 0x10), val);
+		val = nv_rd32(dev, 0x00616104 + (i * 0x800));
+		nv_wr32(dev, 0x00610194 + (i * 0x10), val);
+		val = nv_rd32(dev, 0x00616108 + (i * 0x800));
+		nv_wr32(dev, 0x00610198 + (i * 0x10), val);
+		val = nv_rd32(dev, 0x0061610c + (i * 0x800));
+		nv_wr32(dev, 0x0061019c + (i * 0x10), val);
+	}
+	/* DAC */
+	for (i = 0; i < 3; i++) {
+		val = nv_rd32(dev, 0x0061a000 + (i * 0x800));
+		nv_wr32(dev, 0x006101d0 + (i * 0x04), val);
+	}
+	/* SOR */
+	for (i = 0; i < 4; i++) {
+		val = nv_rd32(dev, 0x0061c000 + (i * 0x800));
+		nv_wr32(dev, 0x006101e0 + (i * 0x04), val);
+	}
+	/* Something not yet in use, tv-out maybe. */
+	for (i = 0; i < 3; i++) {
+		val = nv_rd32(dev, 0x0061e000 + (i * 0x800));
+		nv_wr32(dev, 0x006101f0 + (i * 0x04), val);
+	}
+
+	for (i = 0; i < 3; i++) {
+		nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 |
+			NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
+		nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
+	}
+
+	/* This used to be in crtc unblank, but seems out of place there. */
+	nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0);
+	/* RAM is clamped to 256 MiB. */
+	ram_amount = nouveau_mem_fb_amount(dev);
+	NV_DEBUG(dev, "ram_amount %d\n", ram_amount);
+	if (ram_amount > 256*1024*1024)
+		ram_amount = 256*1024*1024;
+	nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1);
+	nv_wr32(dev, NV50_PDISPLAY_UNK_388, 0x150000);
+	nv_wr32(dev, NV50_PDISPLAY_UNK_38C, 0);
+
+	/* The precise purpose is unknown, i suspect it has something to do
+	 * with text mode.
+	 */
+	if (nv_rd32(dev, NV50_PDISPLAY_INTR_1) & 0x100) {
+		nv_wr32(dev, NV50_PDISPLAY_INTR_1, 0x100);
+		nv_wr32(dev, 0x006194e8, nv_rd32(dev, 0x006194e8) & ~1);
+		if (!nv_wait(0x006194e8, 2, 0)) {
+			NV_ERROR(dev, "timeout: (0x6194e8 & 2) != 0\n");
+			NV_ERROR(dev, "0x6194e8 = 0x%08x\n",
+						nv_rd32(dev, 0x6194e8));
+			return -EBUSY;
+		}
+	}
+
+	/* taken from nv bug #12637, attempts to un-wedge the hw if it's
+	 * stuck in some unspecified state
+	 */
+	start = ptimer->read(dev);
+	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x2b00);
+	while ((val = nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))) & 0x1e0000) {
+		if ((val & 0x9f0000) == 0x20000)
+			nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
+							val | 0x800000);
+
+		if ((val & 0x3f0000) == 0x30000)
+			nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
+							val | 0x200000);
+
+		if (ptimer->read(dev) - start > 1000000000ULL) {
+			NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n");
+			NV_ERROR(dev, "0x610200 = 0x%08x\n", val);
+			return -EBUSY;
+		}
+	}
+
+	nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE);
+	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03);
+	if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x40000000, 0x40000000)) {
+		NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n");
+		NV_ERROR(dev, "0x610200 = 0x%08x\n",
+			  nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
+		return -EBUSY;
+	}
+
+	for (i = 0; i < 2; i++) {
+		nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
+		if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
+			     NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
+			NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
+			NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
+				 nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
+			return -EBUSY;
+		}
+
+		nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
+			NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON);
+		if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
+			     NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS,
+			     NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) {
+			NV_ERROR(dev, "timeout: "
+				      "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i);
+			NV_ERROR(dev, "CURSOR_CTRL2(%d) = 0x%08x\n", i,
+				 nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
+			return -EBUSY;
+		}
+	}
+
+	nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->instance >> 8) | 9);
+
+	/* initialise fifo */
+	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0),
+		((evo->pushbuf_bo->bo.mem.mm_node->start << PAGE_SHIFT) >> 8) |
+		NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM |
+		NV50_PDISPLAY_CHANNEL_DMA_CB_VALID);
+	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000);
+	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002);
+	if (!nv_wait(0x610200, 0x80000000, 0x00000000)) {
+		NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n");
+		NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200));
+		return -EBUSY;
+	}
+	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
+		(nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)) & ~0x00000003) |
+		 NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
+	nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0);
+	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x01000003 |
+		NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
+	nv_wr32(dev, 0x610300, nv_rd32(dev, 0x610300) & ~1);
+
+	evo->dma.max = (4096/4) - 2;
+	evo->dma.put = 0;
+	evo->dma.cur = evo->dma.put;
+	evo->dma.free = evo->dma.max - evo->dma.cur;
+
+	ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
+		OUT_RING(evo, 0);
+
+	ret = RING_SPACE(evo, 11);
+	if (ret)
+		return ret;
+	BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
+	OUT_RING(evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
+	OUT_RING(evo, NV50_EVO_DMA_NOTIFY_HANDLE_NONE);
+	BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, FB_DMA), 1);
+	OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
+	BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK0800), 1);
+	OUT_RING(evo, 0);
+	BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, DISPLAY_START), 1);
+	OUT_RING(evo, 0);
+	BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
+	OUT_RING(evo, 0);
+	FIRE_RING(evo);
+	if (!nv_wait(0x640004, 0xffffffff, evo->dma.put << 2))
+		NV_ERROR(dev, "evo pushbuf stalled\n");
+
+	/* enable clock change interrupts. */
+	nv_wr32(dev, 0x610028, 0x00010001);
+	nv_wr32(dev, NV50_PDISPLAY_INTR_EN, (NV50_PDISPLAY_INTR_EN_CLK_UNK10 |
+					     NV50_PDISPLAY_INTR_EN_CLK_UNK20 |
+					     NV50_PDISPLAY_INTR_EN_CLK_UNK40));
+
+	/* enable hotplug interrupts */
+	hpd_en[0] = hpd_en[1] = 0;
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct nouveau_connector *conn = nouveau_connector(connector);
+		struct dcb_gpio_entry *gpio;
+
+		if (connector->connector_type != DRM_MODE_CONNECTOR_DVII &&
+		    connector->connector_type != DRM_MODE_CONNECTOR_DVID &&
+		    connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+			continue;
+
+		gpio = nouveau_bios_gpio_entry(dev, conn->dcb->gpio_tag);
+		if (!gpio)
+			continue;
+
+		hpd_en[gpio->line >> 4] |= (0x00010001 << (gpio->line & 0xf));
+	}
+
+	nv_wr32(dev, 0xe054, 0xffffffff);
+	nv_wr32(dev, 0xe050, hpd_en[0]);
+	if (dev_priv->chipset >= 0x90) {
+		nv_wr32(dev, 0xe074, 0xffffffff);
+		nv_wr32(dev, 0xe070, hpd_en[1]);
+	}
+
+	return 0;
+}
+
+static int nv50_display_disable(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct drm_crtc *drm_crtc;
+	int ret, i;
+
+	NV_DEBUG(dev, "\n");
+
+	list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
+		struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
+
+		nv50_crtc_blank(crtc, true);
+	}
+
+	ret = RING_SPACE(dev_priv->evo, 2);
+	if (ret == 0) {
+		BEGIN_RING(dev_priv->evo, 0, NV50_EVO_UPDATE, 1);
+		OUT_RING(dev_priv->evo, 0);
+	}
+	FIRE_RING(dev_priv->evo);
+
+	/* Almost like ack'ing a vblank interrupt, maybe in the spirit of
+	 * cleaning up?
+	 */
+	list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
+		struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
+		uint32_t mask = NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(crtc->index);
+
+		if (!crtc->base.enabled)
+			continue;
+
+		nv_wr32(dev, NV50_PDISPLAY_INTR_1, mask);
+		if (!nv_wait(NV50_PDISPLAY_INTR_1, mask, mask)) {
+			NV_ERROR(dev, "timeout: (0x610024 & 0x%08x) == "
+				      "0x%08x\n", mask, mask);
+			NV_ERROR(dev, "0x610024 = 0x%08x\n",
+				 nv_rd32(dev, NV50_PDISPLAY_INTR_1));
+		}
+	}
+
+	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0);
+	nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0);
+	if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) {
+		NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n");
+		NV_ERROR(dev, "0x610200 = 0x%08x\n",
+			  nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
+	}
+
+	for (i = 0; i < 3; i++) {
+		if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(i),
+			     NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
+			NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i);
+			NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", i,
+				  nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i)));
+		}
+	}
+
+	/* disable interrupts. */
+	nv_wr32(dev, NV50_PDISPLAY_INTR_EN, 0x00000000);
+
+	/* disable hotplug interrupts */
+	nv_wr32(dev, 0xe054, 0xffffffff);
+	nv_wr32(dev, 0xe050, 0x00000000);
+	if (dev_priv->chipset >= 0x90) {
+		nv_wr32(dev, 0xe074, 0xffffffff);
+		nv_wr32(dev, 0xe070, 0x00000000);
+	}
+	return 0;
+}
+
+int nv50_display_create(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct parsed_dcb *dcb = dev_priv->vbios->dcb;
+	uint32_t connector[16] = {};
+	int ret, i;
+
+	NV_DEBUG(dev, "\n");
+
+	/* init basic kernel modesetting */
+	drm_mode_config_init(dev);
+
+	/* Initialise some optional connector properties. */
+	drm_mode_create_scaling_mode_property(dev);
+	drm_mode_create_dithering_property(dev);
+
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+
+	dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
+
+	dev->mode_config.max_width = 8192;
+	dev->mode_config.max_height = 8192;
+
+	dev->mode_config.fb_base = dev_priv->fb_phys;
+
+	/* Create EVO channel */
+	ret = nv50_evo_channel_new(dev, &dev_priv->evo);
+	if (ret) {
+		NV_ERROR(dev, "Error creating EVO channel: %d\n", ret);
+		return ret;
+	}
+
+	/* Create CRTC objects */
+	for (i = 0; i < 2; i++)
+		nv50_crtc_create(dev, i);
+
+	/* We setup the encoders from the BIOS table */
+	for (i = 0 ; i < dcb->entries; i++) {
+		struct dcb_entry *entry = &dcb->entry[i];
+
+		if (entry->location != DCB_LOC_ON_CHIP) {
+			NV_WARN(dev, "Off-chip encoder %d/%d unsupported\n",
+				entry->type, ffs(entry->or) - 1);
+			continue;
+		}
+
+		switch (entry->type) {
+		case OUTPUT_TMDS:
+		case OUTPUT_LVDS:
+		case OUTPUT_DP:
+			nv50_sor_create(dev, entry);
+			break;
+		case OUTPUT_ANALOG:
+			nv50_dac_create(dev, entry);
+			break;
+		default:
+			NV_WARN(dev, "DCB encoder %d unknown\n", entry->type);
+			continue;
+		}
+
+		connector[entry->connector] |= (1 << entry->type);
+	}
+
+	/* It appears that DCB 3.0+ VBIOS has a connector table, however,
+	 * I'm not 100% certain how to decode it correctly yet so just
+	 * look at what encoders are present on each connector index and
+	 * attempt to derive the connector type from that.
+	 */
+	for (i = 0 ; i < dcb->entries; i++) {
+		struct dcb_entry *entry = &dcb->entry[i];
+		uint16_t encoders;
+		int type;
+
+		encoders = connector[entry->connector];
+		if (!(encoders & (1 << entry->type)))
+			continue;
+		connector[entry->connector] = 0;
+
+		if (encoders & (1 << OUTPUT_DP)) {
+			type = DRM_MODE_CONNECTOR_DisplayPort;
+		} else if (encoders & (1 << OUTPUT_TMDS)) {
+			if (encoders & (1 << OUTPUT_ANALOG))
+				type = DRM_MODE_CONNECTOR_DVII;
+			else
+				type = DRM_MODE_CONNECTOR_DVID;
+		} else if (encoders & (1 << OUTPUT_ANALOG)) {
+			type = DRM_MODE_CONNECTOR_VGA;
+		} else if (encoders & (1 << OUTPUT_LVDS)) {
+			type = DRM_MODE_CONNECTOR_LVDS;
+		} else {
+			type = DRM_MODE_CONNECTOR_Unknown;
+		}
+
+		if (type == DRM_MODE_CONNECTOR_Unknown)
+			continue;
+
+		nouveau_connector_create(dev, entry->connector, type);
+	}
+
+	ret = nv50_display_init(dev);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int nv50_display_destroy(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	NV_DEBUG(dev, "\n");
+
+	drm_mode_config_cleanup(dev);
+
+	nv50_display_disable(dev);
+	nv50_evo_channel_del(&dev_priv->evo);
+
+	return 0;
+}
+
+static inline uint32_t
+nv50_display_mode_ctrl(struct drm_device *dev, bool sor, int or)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t mc;
+
+	if (sor) {
+		if (dev_priv->chipset < 0x90 ||
+		    dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0)
+			mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_P(or));
+		else
+			mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_P(or));
+	} else {
+		mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_P(or));
+	}
+
+	return mc;
+}
+
+static int
+nv50_display_irq_head(struct drm_device *dev, int *phead,
+		      struct dcb_entry **pdcbent)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t unk30 = nv_rd32(dev, NV50_PDISPLAY_UNK30_CTRL);
+	uint32_t dac = 0, sor = 0;
+	int head, i, or = 0, type = OUTPUT_ANY;
+
+	/* We're assuming that head 0 *or* head 1 will be active here,
+	 * and not both.  I'm not sure if the hw will even signal both
+	 * ever, but it definitely shouldn't for us as we commit each
+	 * CRTC separately, and submission will be blocked by the GPU
+	 * until we handle each in turn.
+	 */
+	NV_DEBUG(dev, "0x610030: 0x%08x\n", unk30);
+	head = ffs((unk30 >> 9) & 3) - 1;
+	if (head < 0)
+		return -EINVAL;
+
+	/* This assumes CRTCs are never bound to multiple encoders, which
+	 * should be the case.
+	 */
+	for (i = 0; i < 3 && type == OUTPUT_ANY; i++) {
+		uint32_t mc = nv50_display_mode_ctrl(dev, false, i);
+		if (!(mc & (1 << head)))
+			continue;
+
+		switch ((mc >> 8) & 0xf) {
+		case 0: type = OUTPUT_ANALOG; break;
+		case 1: type = OUTPUT_TV; break;
+		default:
+			NV_ERROR(dev, "unknown dac mode_ctrl: 0x%08x\n", dac);
+			return -1;
+		}
+
+		or = i;
+	}
+
+	for (i = 0; i < 4 && type == OUTPUT_ANY; i++) {
+		uint32_t mc = nv50_display_mode_ctrl(dev, true, i);
+		if (!(mc & (1 << head)))
+			continue;
+
+		switch ((mc >> 8) & 0xf) {
+		case 0: type = OUTPUT_LVDS; break;
+		case 1: type = OUTPUT_TMDS; break;
+		case 2: type = OUTPUT_TMDS; break;
+		case 5: type = OUTPUT_TMDS; break;
+		case 8: type = OUTPUT_DP; break;
+		case 9: type = OUTPUT_DP; break;
+		default:
+			NV_ERROR(dev, "unknown sor mode_ctrl: 0x%08x\n", sor);
+			return -1;
+		}
+
+		or = i;
+	}
+
+	NV_DEBUG(dev, "type %d, or %d\n", type, or);
+	if (type == OUTPUT_ANY) {
+		NV_ERROR(dev, "unknown encoder!!\n");
+		return -1;
+	}
+
+	for (i = 0; i < dev_priv->vbios->dcb->entries; i++) {
+		struct dcb_entry *dcbent = &dev_priv->vbios->dcb->entry[i];
+
+		if (dcbent->type != type)
+			continue;
+
+		if (!(dcbent->or & (1 << or)))
+			continue;
+
+		*phead = head;
+		*pdcbent = dcbent;
+		return 0;
+	}
+
+	NV_ERROR(dev, "no DCB entry for %d %d\n", dac != 0, or);
+	return 0;
+}
+
+static uint32_t
+nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
+			   int pxclk)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->VBIOS;
+	uint32_t mc, script = 0, or;
+
+	or = ffs(dcbent->or) - 1;
+	mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or);
+	switch (dcbent->type) {
+	case OUTPUT_LVDS:
+		script = (mc >> 8) & 0xf;
+		if (bios->pub.fp_no_ddc) {
+			if (bios->fp.dual_link)
+				script |= 0x0100;
+			if (bios->fp.if_is_24bit)
+				script |= 0x0200;
+		} else {
+			if (pxclk >= bios->fp.duallink_transition_clk) {
+				script |= 0x0100;
+				if (bios->fp.strapless_is_24bit & 2)
+					script |= 0x0200;
+			} else
+			if (bios->fp.strapless_is_24bit & 1)
+				script |= 0x0200;
+		}
+
+		if (nouveau_uscript_lvds >= 0) {
+			NV_INFO(dev, "override script 0x%04x with 0x%04x "
+				     "for output LVDS-%d\n", script,
+				     nouveau_uscript_lvds, or);
+			script = nouveau_uscript_lvds;
+		}
+		break;
+	case OUTPUT_TMDS:
+		script = (mc >> 8) & 0xf;
+		if (pxclk >= 165000)
+			script |= 0x0100;
+
+		if (nouveau_uscript_tmds >= 0) {
+			NV_INFO(dev, "override script 0x%04x with 0x%04x "
+				     "for output TMDS-%d\n", script,
+				     nouveau_uscript_tmds, or);
+			script = nouveau_uscript_tmds;
+		}
+		break;
+	case OUTPUT_DP:
+		script = (mc >> 8) & 0xf;
+		break;
+	case OUTPUT_ANALOG:
+		script = 0xff;
+		break;
+	default:
+		NV_ERROR(dev, "modeset on unsupported output type!\n");
+		break;
+	}
+
+	return script;
+}
+
+static void
+nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan;
+	struct list_head *entry, *tmp;
+
+	list_for_each_safe(entry, tmp, &dev_priv->vbl_waiting) {
+		chan = list_entry(entry, struct nouveau_channel, nvsw.vbl_wait);
+
+		nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset,
+						chan->nvsw.vblsem_rval);
+		list_del(&chan->nvsw.vbl_wait);
+	}
+}
+
+static void
+nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr)
+{
+	intr &= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
+
+	if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0)
+		nv50_display_vblank_crtc_handler(dev, 0);
+
+	if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1)
+		nv50_display_vblank_crtc_handler(dev, 1);
+
+	nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
+		     NV50_PDISPLAY_INTR_EN) & ~intr);
+	nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr);
+}
+
+static void
+nv50_display_unk10_handler(struct drm_device *dev)
+{
+	struct dcb_entry *dcbent;
+	int head, ret;
+
+	ret = nv50_display_irq_head(dev, &head, &dcbent);
+	if (ret)
+		goto ack;
+
+	nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) & ~8);
+
+	nouveau_bios_run_display_table(dev, dcbent, 0, -1);
+
+ack:
+	nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10);
+	nv_wr32(dev, 0x610030, 0x80000000);
+}
+
+static void
+nv50_display_unk20_handler(struct drm_device *dev)
+{
+	struct dcb_entry *dcbent;
+	uint32_t tmp, pclk, script;
+	int head, or, ret;
+
+	ret = nv50_display_irq_head(dev, &head, &dcbent);
+	if (ret)
+		goto ack;
+	or = ffs(dcbent->or) - 1;
+	pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff;
+	script = nv50_display_script_select(dev, dcbent, pclk);
+
+	NV_DEBUG(dev, "head %d pxclk: %dKHz\n", head, pclk);
+
+	if (dcbent->type != OUTPUT_DP)
+		nouveau_bios_run_display_table(dev, dcbent, 0, -2);
+
+	nv50_crtc_set_clock(dev, head, pclk);
+
+	nouveau_bios_run_display_table(dev, dcbent, script, pclk);
+
+	tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head));
+	tmp &= ~0x000000f;
+	nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp);
+
+	if (dcbent->type != OUTPUT_ANALOG) {
+		tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or));
+		tmp &= ~0x00000f0f;
+		if (script & 0x0100)
+			tmp |= 0x00000101;
+		nv_wr32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp);
+	} else {
+		nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
+	}
+
+ack:
+	nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
+	nv_wr32(dev, 0x610030, 0x80000000);
+}
+
+static void
+nv50_display_unk40_handler(struct drm_device *dev)
+{
+	struct dcb_entry *dcbent;
+	int head, pclk, script, ret;
+
+	ret = nv50_display_irq_head(dev, &head, &dcbent);
+	if (ret)
+		goto ack;
+	pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff;
+	script = nv50_display_script_select(dev, dcbent, pclk);
+
+	nouveau_bios_run_display_table(dev, dcbent, script, -pclk);
+
+ack:
+	nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40);
+	nv_wr32(dev, 0x610030, 0x80000000);
+	nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) | 8);
+}
+
+void
+nv50_display_irq_handler_bh(struct work_struct *work)
+{
+	struct drm_nouveau_private *dev_priv =
+		container_of(work, struct drm_nouveau_private, irq_work);
+	struct drm_device *dev = dev_priv->dev;
+
+	for (;;) {
+		uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
+		uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
+
+		NV_DEBUG(dev, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
+
+		if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10)
+			nv50_display_unk10_handler(dev);
+		else
+		if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK20)
+			nv50_display_unk20_handler(dev);
+		else
+		if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK40)
+			nv50_display_unk40_handler(dev);
+		else
+			break;
+	}
+
+	nv_wr32(dev, NV03_PMC_INTR_EN_0, 1);
+}
+
+static void
+nv50_display_error_handler(struct drm_device *dev)
+{
+	uint32_t addr, data;
+
+	nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000);
+	addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR);
+	data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA);
+
+	NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x (0x%04x 0x%02x)\n",
+		 0, addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
+
+	nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000);
+}
+
+static void
+nv50_display_irq_hotplug(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct drm_connector *connector;
+	const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
+	uint32_t unplug_mask, plug_mask, change_mask;
+	uint32_t hpd0, hpd1 = 0;
+
+	hpd0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
+	if (dev_priv->chipset >= 0x90)
+		hpd1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
+
+	plug_mask   = (hpd0 & 0x0000ffff) | (hpd1 << 16);
+	unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000);
+	change_mask = plug_mask | unplug_mask;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct drm_encoder_helper_funcs *helper;
+		struct nouveau_connector *nv_connector =
+			nouveau_connector(connector);
+		struct nouveau_encoder *nv_encoder;
+		struct dcb_gpio_entry *gpio;
+		uint32_t reg;
+		bool plugged;
+
+		if (!nv_connector->dcb)
+			continue;
+
+		gpio = nouveau_bios_gpio_entry(dev, nv_connector->dcb->gpio_tag);
+		if (!gpio || !(change_mask & (1 << gpio->line)))
+			continue;
+
+		reg = nv_rd32(dev, gpio_reg[gpio->line >> 3]);
+		plugged = !!(reg & (4 << ((gpio->line & 7) << 2)));
+		NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
+			drm_get_connector_name(connector)) ;
+
+		if (!connector->encoder || !connector->encoder->crtc ||
+		    !connector->encoder->crtc->enabled)
+			continue;
+		nv_encoder = nouveau_encoder(connector->encoder);
+		helper = connector->encoder->helper_private;
+
+		if (nv_encoder->dcb->type != OUTPUT_DP)
+			continue;
+
+		if (plugged)
+			helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
+		else
+			helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
+	}
+
+	nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054));
+	if (dev_priv->chipset >= 0x90)
+		nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074));
+}
+
+void
+nv50_display_irq_handler(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t delayed = 0;
+
+	while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG)
+		nv50_display_irq_hotplug(dev);
+
+	while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
+		uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
+		uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
+		uint32_t clock;
+
+		NV_DEBUG(dev, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
+
+		if (!intr0 && !(intr1 & ~delayed))
+			break;
+
+		if (intr0 & 0x00010000) {
+			nv50_display_error_handler(dev);
+			intr0 &= ~0x00010000;
+		}
+
+		if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
+			nv50_display_vblank_handler(dev, intr1);
+			intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
+		}
+
+		clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 |
+				  NV50_PDISPLAY_INTR_1_CLK_UNK20 |
+				  NV50_PDISPLAY_INTR_1_CLK_UNK40));
+		if (clock) {
+			nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
+			if (!work_pending(&dev_priv->irq_work))
+				queue_work(dev_priv->wq, &dev_priv->irq_work);
+			delayed |= clock;
+			intr1 &= ~clock;
+		}
+
+		if (intr0) {
+			NV_ERROR(dev, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0);
+			nv_wr32(dev, NV50_PDISPLAY_INTR_0, intr0);
+		}
+
+		if (intr1) {
+			NV_ERROR(dev,
+				 "unknown PDISPLAY_INTR_1: 0x%08x\n", intr1);
+			nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr1);
+		}
+	}
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
new file mode 100644
index 0000000..3ae8d07
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NV50_DISPLAY_H__
+#define __NV50_DISPLAY_H__
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_reg.h"
+#include "nouveau_crtc.h"
+#include "nv50_evo.h"
+
+void nv50_display_irq_handler(struct drm_device *dev);
+void nv50_display_irq_handler_bh(struct work_struct *work);
+int nv50_display_init(struct drm_device *dev);
+int nv50_display_create(struct drm_device *dev);
+int nv50_display_destroy(struct drm_device *dev);
+int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
+int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
+
+#endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
new file mode 100644
index 0000000..aae1334
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_evo.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#define NV50_EVO_UPDATE                                              0x00000080
+#define NV50_EVO_UNK84                                               0x00000084
+#define NV50_EVO_UNK84_NOTIFY                                        0x40000000
+#define NV50_EVO_UNK84_NOTIFY_DISABLED                               0x00000000
+#define NV50_EVO_UNK84_NOTIFY_ENABLED                                0x40000000
+#define NV50_EVO_DMA_NOTIFY                                          0x00000088
+#define NV50_EVO_DMA_NOTIFY_HANDLE                                   0xffffffff
+#define NV50_EVO_DMA_NOTIFY_HANDLE_NONE                              0x00000000
+#define NV50_EVO_UNK8C                                               0x0000008C
+
+#define NV50_EVO_DAC(n, r)                       ((n) * 0x80 + NV50_EVO_DAC_##r)
+#define NV50_EVO_DAC_MODE_CTRL                                       0x00000400
+#define NV50_EVO_DAC_MODE_CTRL_CRTC0                                 0x00000001
+#define NV50_EVO_DAC_MODE_CTRL_CRTC1                                 0x00000002
+#define NV50_EVO_DAC_MODE_CTRL2                                      0x00000404
+#define NV50_EVO_DAC_MODE_CTRL2_NHSYNC                               0x00000001
+#define NV50_EVO_DAC_MODE_CTRL2_NVSYNC                               0x00000002
+
+#define NV50_EVO_SOR(n, r)                       ((n) * 0x40 + NV50_EVO_SOR_##r)
+#define NV50_EVO_SOR_MODE_CTRL                                       0x00000600
+#define NV50_EVO_SOR_MODE_CTRL_CRTC0                                 0x00000001
+#define NV50_EVO_SOR_MODE_CTRL_CRTC1                                 0x00000002
+#define NV50_EVO_SOR_MODE_CTRL_TMDS                                  0x00000100
+#define NV50_EVO_SOR_MODE_CTRL_TMDS_DUAL_LINK                        0x00000400
+#define NV50_EVO_SOR_MODE_CTRL_NHSYNC                                0x00001000
+#define NV50_EVO_SOR_MODE_CTRL_NVSYNC                                0x00002000
+
+#define NV50_EVO_CRTC(n, r)                    ((n) * 0x400 + NV50_EVO_CRTC_##r)
+#define NV84_EVO_CRTC(n, r)                    ((n) * 0x400 + NV84_EVO_CRTC_##r)
+#define NV50_EVO_CRTC_UNK0800                                        0x00000800
+#define NV50_EVO_CRTC_CLOCK                                          0x00000804
+#define NV50_EVO_CRTC_INTERLACE                                      0x00000808
+#define NV50_EVO_CRTC_DISPLAY_START                                  0x00000810
+#define NV50_EVO_CRTC_DISPLAY_TOTAL                                  0x00000814
+#define NV50_EVO_CRTC_SYNC_DURATION                                  0x00000818
+#define NV50_EVO_CRTC_SYNC_START_TO_BLANK_END                        0x0000081c
+#define NV50_EVO_CRTC_UNK0820                                        0x00000820
+#define NV50_EVO_CRTC_UNK0824                                        0x00000824
+#define NV50_EVO_CRTC_UNK082C                                        0x0000082c
+#define NV50_EVO_CRTC_CLUT_MODE                                      0x00000840
+/* You can't have a palette in 8 bit mode (=OFF) */
+#define NV50_EVO_CRTC_CLUT_MODE_BLANK                                0x00000000
+#define NV50_EVO_CRTC_CLUT_MODE_OFF                                  0x80000000
+#define NV50_EVO_CRTC_CLUT_MODE_ON                                   0xC0000000
+#define NV50_EVO_CRTC_CLUT_OFFSET                                    0x00000844
+#define NV84_EVO_CRTC_CLUT_DMA                                       0x0000085C
+#define NV84_EVO_CRTC_CLUT_DMA_HANDLE                                0xffffffff
+#define NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE                           0x00000000
+#define NV50_EVO_CRTC_FB_OFFSET                                      0x00000860
+#define NV50_EVO_CRTC_FB_SIZE                                        0x00000868
+#define NV50_EVO_CRTC_FB_CONFIG                                      0x0000086c
+#define NV50_EVO_CRTC_FB_CONFIG_MODE                                 0x00100000
+#define NV50_EVO_CRTC_FB_CONFIG_MODE_TILE                            0x00000000
+#define NV50_EVO_CRTC_FB_CONFIG_MODE_PITCH                           0x00100000
+#define NV50_EVO_CRTC_FB_DEPTH                                       0x00000870
+#define NV50_EVO_CRTC_FB_DEPTH_8                                     0x00001e00
+#define NV50_EVO_CRTC_FB_DEPTH_15                                    0x0000e900
+#define NV50_EVO_CRTC_FB_DEPTH_16                                    0x0000e800
+#define NV50_EVO_CRTC_FB_DEPTH_24                                    0x0000cf00
+#define NV50_EVO_CRTC_FB_DEPTH_30                                    0x0000d100
+#define NV50_EVO_CRTC_FB_DMA                                         0x00000874
+#define NV50_EVO_CRTC_FB_DMA_HANDLE                                  0xffffffff
+#define NV50_EVO_CRTC_FB_DMA_HANDLE_NONE                             0x00000000
+#define NV50_EVO_CRTC_CURSOR_CTRL                                    0x00000880
+#define NV50_EVO_CRTC_CURSOR_CTRL_HIDE                               0x05000000
+#define NV50_EVO_CRTC_CURSOR_CTRL_SHOW                               0x85000000
+#define NV50_EVO_CRTC_CURSOR_OFFSET                                  0x00000884
+#define NV84_EVO_CRTC_CURSOR_DMA                                     0x0000089c
+#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE                              0xffffffff
+#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE                         0x00000000
+#define NV50_EVO_CRTC_DITHER_CTRL                                    0x000008a0
+#define NV50_EVO_CRTC_DITHER_CTRL_OFF                                0x00000000
+#define NV50_EVO_CRTC_DITHER_CTRL_ON                                 0x00000011
+#define NV50_EVO_CRTC_SCALE_CTRL                                     0x000008a4
+#define NV50_EVO_CRTC_SCALE_CTRL_INACTIVE                            0x00000000
+#define NV50_EVO_CRTC_SCALE_CTRL_ACTIVE                              0x00000009
+#define NV50_EVO_CRTC_COLOR_CTRL                                     0x000008a8
+#define NV50_EVO_CRTC_COLOR_CTRL_COLOR                               0x00040000
+#define NV50_EVO_CRTC_FB_POS                                         0x000008c0
+#define NV50_EVO_CRTC_REAL_RES                                       0x000008c8
+#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET                            0x000008d4
+#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(x, y) \
+	((((unsigned)y << 16) & 0xFFFF0000) | (((unsigned)x) & 0x0000FFFF))
+/* Both of these are needed, otherwise nothing happens. */
+#define NV50_EVO_CRTC_SCALE_RES1                                     0x000008d8
+#define NV50_EVO_CRTC_SCALE_RES2                                     0x000008dc
+
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
new file mode 100644
index 0000000..6bcc6d3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -0,0 +1,273 @@
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_fbcon.h"
+
+static void
+nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+	struct nouveau_fbcon_par *par = info->par;
+	struct drm_device *dev = par->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = dev_priv->channel;
+
+	if (info->state != FBINFO_STATE_RUNNING)
+		return;
+
+	if (!(info->flags & FBINFO_HWACCEL_DISABLED) &&
+	     RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) {
+		NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+
+		info->flags |= FBINFO_HWACCEL_DISABLED;
+	}
+
+	if (info->flags & FBINFO_HWACCEL_DISABLED) {
+		cfb_fillrect(info, rect);
+		return;
+	}
+
+	if (rect->rop != ROP_COPY) {
+		BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
+		OUT_RING(chan, 1);
+	}
+	BEGIN_RING(chan, NvSub2D, 0x0588, 1);
+	OUT_RING(chan, rect->color);
+	BEGIN_RING(chan, NvSub2D, 0x0600, 4);
+	OUT_RING(chan, rect->dx);
+	OUT_RING(chan, rect->dy);
+	OUT_RING(chan, rect->dx + rect->width);
+	OUT_RING(chan, rect->dy + rect->height);
+	if (rect->rop != ROP_COPY) {
+		BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
+		OUT_RING(chan, 3);
+	}
+	FIRE_RING(chan);
+}
+
+static void
+nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
+{
+	struct nouveau_fbcon_par *par = info->par;
+	struct drm_device *dev = par->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = dev_priv->channel;
+
+	if (info->state != FBINFO_STATE_RUNNING)
+		return;
+
+	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) {
+		NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+
+		info->flags |= FBINFO_HWACCEL_DISABLED;
+	}
+
+	if (info->flags & FBINFO_HWACCEL_DISABLED) {
+		cfb_copyarea(info, region);
+		return;
+	}
+
+	BEGIN_RING(chan, NvSub2D, 0x0110, 1);
+	OUT_RING(chan, 0);
+	BEGIN_RING(chan, NvSub2D, 0x08b0, 4);
+	OUT_RING(chan, region->dx);
+	OUT_RING(chan, region->dy);
+	OUT_RING(chan, region->width);
+	OUT_RING(chan, region->height);
+	BEGIN_RING(chan, NvSub2D, 0x08d0, 4);
+	OUT_RING(chan, 0);
+	OUT_RING(chan, region->sx);
+	OUT_RING(chan, 0);
+	OUT_RING(chan, region->sy);
+	FIRE_RING(chan);
+}
+
+static void
+nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+	struct nouveau_fbcon_par *par = info->par;
+	struct drm_device *dev = par->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = dev_priv->channel;
+	uint32_t width, dwords, *data = (uint32_t *)image->data;
+	uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
+	uint32_t *palette = info->pseudo_palette;
+
+	if (info->state != FBINFO_STATE_RUNNING)
+		return;
+
+	if (image->depth != 1) {
+		cfb_imageblit(info, image);
+		return;
+	}
+
+	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) {
+		NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+		info->flags |= FBINFO_HWACCEL_DISABLED;
+	}
+
+	if (info->flags & FBINFO_HWACCEL_DISABLED) {
+		cfb_imageblit(info, image);
+		return;
+	}
+
+	width = (image->width + 31) & ~31;
+	dwords = (width * image->height) >> 5;
+
+	BEGIN_RING(chan, NvSub2D, 0x0814, 2);
+	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+		OUT_RING(chan, palette[image->bg_color] | mask);
+		OUT_RING(chan, palette[image->fg_color] | mask);
+	} else {
+		OUT_RING(chan, image->bg_color);
+		OUT_RING(chan, image->fg_color);
+	}
+	BEGIN_RING(chan, NvSub2D, 0x0838, 2);
+	OUT_RING(chan, image->width);
+	OUT_RING(chan, image->height);
+	BEGIN_RING(chan, NvSub2D, 0x0850, 4);
+	OUT_RING(chan, 0);
+	OUT_RING(chan, image->dx);
+	OUT_RING(chan, 0);
+	OUT_RING(chan, image->dy);
+
+	while (dwords) {
+		int push = dwords > 2047 ? 2047 : dwords;
+
+		if (RING_SPACE(chan, push + 1)) {
+			NV_ERROR(dev,
+				 "GPU lockup - switching to software fbcon\n");
+			info->flags |= FBINFO_HWACCEL_DISABLED;
+			cfb_imageblit(info, image);
+			return;
+		}
+
+		dwords -= push;
+
+		BEGIN_RING(chan, NvSub2D, 0x40000860, push);
+		OUT_RINGp(chan, data, push);
+		data += push;
+	}
+
+	FIRE_RING(chan);
+}
+
+int
+nv50_fbcon_accel_init(struct fb_info *info)
+{
+	struct nouveau_fbcon_par *par = info->par;
+	struct drm_device *dev = par->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = dev_priv->channel;
+	struct nouveau_gpuobj *eng2d = NULL;
+	int ret, format;
+
+	switch (info->var.bits_per_pixel) {
+	case 8:
+		format = 0xf3;
+		break;
+	case 15:
+		format = 0xf8;
+		break;
+	case 16:
+		format = 0xe8;
+		break;
+	case 32:
+		switch (info->var.transp.length) {
+		case 0: /* depth 24 */
+		case 8: /* depth 32, just use 24.. */
+			format = 0xe6;
+			break;
+		case 2: /* depth 30 */
+			format = 0xd1;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = nouveau_gpuobj_gr_new(dev_priv->channel, 0x502d, &eng2d);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, Nv2D, eng2d, NULL);
+	if (ret)
+		return ret;
+
+	ret = RING_SPACE(chan, 59);
+	if (ret) {
+		NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+		return ret;
+	}
+
+	BEGIN_RING(chan, NvSub2D, 0x0000, 1);
+	OUT_RING(chan, Nv2D);
+	BEGIN_RING(chan, NvSub2D, 0x0180, 4);
+	OUT_RING(chan, NvNotify0);
+	OUT_RING(chan, chan->vram_handle);
+	OUT_RING(chan, chan->vram_handle);
+	OUT_RING(chan, chan->vram_handle);
+	BEGIN_RING(chan, NvSub2D, 0x0290, 1);
+	OUT_RING(chan, 0);
+	BEGIN_RING(chan, NvSub2D, 0x0888, 1);
+	OUT_RING(chan, 1);
+	BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
+	OUT_RING(chan, 3);
+	BEGIN_RING(chan, NvSub2D, 0x02a0, 1);
+	OUT_RING(chan, 0x55);
+	BEGIN_RING(chan, NvSub2D, 0x08c0, 4);
+	OUT_RING(chan, 0);
+	OUT_RING(chan, 1);
+	OUT_RING(chan, 0);
+	OUT_RING(chan, 1);
+	BEGIN_RING(chan, NvSub2D, 0x0580, 2);
+	OUT_RING(chan, 4);
+	OUT_RING(chan, format);
+	BEGIN_RING(chan, NvSub2D, 0x02e8, 2);
+	OUT_RING(chan, 2);
+	OUT_RING(chan, 1);
+	BEGIN_RING(chan, NvSub2D, 0x0804, 1);
+	OUT_RING(chan, format);
+	BEGIN_RING(chan, NvSub2D, 0x0800, 1);
+	OUT_RING(chan, 1);
+	BEGIN_RING(chan, NvSub2D, 0x0808, 3);
+	OUT_RING(chan, 0);
+	OUT_RING(chan, 0);
+	OUT_RING(chan, 0);
+	BEGIN_RING(chan, NvSub2D, 0x081c, 1);
+	OUT_RING(chan, 1);
+	BEGIN_RING(chan, NvSub2D, 0x0840, 4);
+	OUT_RING(chan, 0);
+	OUT_RING(chan, 1);
+	OUT_RING(chan, 0);
+	OUT_RING(chan, 1);
+	BEGIN_RING(chan, NvSub2D, 0x0200, 2);
+	OUT_RING(chan, format);
+	OUT_RING(chan, 1);
+	BEGIN_RING(chan, NvSub2D, 0x0214, 5);
+	OUT_RING(chan, info->fix.line_length);
+	OUT_RING(chan, info->var.xres_virtual);
+	OUT_RING(chan, info->var.yres_virtual);
+	OUT_RING(chan, 0);
+	OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
+			 dev_priv->vm_vram_base);
+	BEGIN_RING(chan, NvSub2D, 0x0230, 2);
+	OUT_RING(chan, format);
+	OUT_RING(chan, 1);
+	BEGIN_RING(chan, NvSub2D, 0x0244, 5);
+	OUT_RING(chan, info->fix.line_length);
+	OUT_RING(chan, info->var.xres_virtual);
+	OUT_RING(chan, info->var.yres_virtual);
+	OUT_RING(chan, 0);
+	OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
+			 dev_priv->vm_vram_base);
+
+	info->fbops->fb_fillrect = nv50_fbcon_fillrect;
+	info->fbops->fb_copyarea = nv50_fbcon_copyarea;
+	info->fbops->fb_imageblit = nv50_fbcon_imageblit;
+	return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
new file mode 100644
index 0000000..77ae1aa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -0,0 +1,494 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+struct nv50_fifo_priv {
+	struct nouveau_gpuobj_ref *thingo[2];
+	int cur_thingo;
+};
+
+#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
+
+static void
+nv50_fifo_init_thingo(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv;
+	struct nouveau_gpuobj_ref *cur;
+	int i, nr;
+
+	NV_DEBUG(dev, "\n");
+
+	cur = priv->thingo[priv->cur_thingo];
+	priv->cur_thingo = !priv->cur_thingo;
+
+	/* We never schedule channel 0 or 127 */
+	dev_priv->engine.instmem.prepare_access(dev, true);
+	for (i = 1, nr = 0; i < 127; i++) {
+		if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc)
+			nv_wo32(dev, cur->gpuobj, nr++, i);
+	}
+	dev_priv->engine.instmem.finish_access(dev);
+
+	nv_wr32(dev, 0x32f4, cur->instance >> 12);
+	nv_wr32(dev, 0x32ec, nr);
+	nv_wr32(dev, 0x2500, 0x101);
+}
+
+static int
+nv50_fifo_channel_enable(struct drm_device *dev, int channel, bool nt)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = dev_priv->fifos[channel];
+	uint32_t inst;
+
+	NV_DEBUG(dev, "ch%d\n", channel);
+
+	if (!chan->ramfc)
+		return -EINVAL;
+
+	if (IS_G80)
+		inst = chan->ramfc->instance >> 12;
+	else
+		inst = chan->ramfc->instance >> 8;
+	nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel),
+		 inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
+
+	if (!nt)
+		nv50_fifo_init_thingo(dev);
+	return 0;
+}
+
+static void
+nv50_fifo_channel_disable(struct drm_device *dev, int channel, bool nt)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t inst;
+
+	NV_DEBUG(dev, "ch%d, nt=%d\n", channel, nt);
+
+	if (IS_G80)
+		inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80;
+	else
+		inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84;
+	nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst);
+
+	if (!nt)
+		nv50_fifo_init_thingo(dev);
+}
+
+static void
+nv50_fifo_init_reset(struct drm_device *dev)
+{
+	uint32_t pmc_e = NV_PMC_ENABLE_PFIFO;
+
+	NV_DEBUG(dev, "\n");
+
+	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
+	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |  pmc_e);
+}
+
+static void
+nv50_fifo_init_intr(struct drm_device *dev)
+{
+	NV_DEBUG(dev, "\n");
+
+	nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF);
+	nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
+}
+
+static void
+nv50_fifo_init_context_table(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int i;
+
+	NV_DEBUG(dev, "\n");
+
+	for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
+		if (dev_priv->fifos[i])
+			nv50_fifo_channel_enable(dev, i, true);
+		else
+			nv50_fifo_channel_disable(dev, i, true);
+	}
+
+	nv50_fifo_init_thingo(dev);
+}
+
+static void
+nv50_fifo_init_regs__nv(struct drm_device *dev)
+{
+	NV_DEBUG(dev, "\n");
+
+	nv_wr32(dev, 0x250c, 0x6f3cfc34);
+}
+
+static void
+nv50_fifo_init_regs(struct drm_device *dev)
+{
+	NV_DEBUG(dev, "\n");
+
+	nv_wr32(dev, 0x2500, 0);
+	nv_wr32(dev, 0x3250, 0);
+	nv_wr32(dev, 0x3220, 0);
+	nv_wr32(dev, 0x3204, 0);
+	nv_wr32(dev, 0x3210, 0);
+	nv_wr32(dev, 0x3270, 0);
+
+	/* Enable dummy channels setup by nv50_instmem.c */
+	nv50_fifo_channel_enable(dev, 0, true);
+	nv50_fifo_channel_enable(dev, 127, true);
+}
+
+int
+nv50_fifo_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv50_fifo_priv *priv;
+	int ret;
+
+	NV_DEBUG(dev, "\n");
+
+	priv = dev_priv->engine.fifo.priv;
+	if (priv) {
+		priv->cur_thingo = !priv->cur_thingo;
+		goto just_reset;
+	}
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+	dev_priv->engine.fifo.priv = priv;
+
+	ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
+				     NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[0]);
+	if (ret) {
+		NV_ERROR(dev, "error creating thingo0: %d\n", ret);
+		return ret;
+	}
+
+	ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
+				     NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[1]);
+	if (ret) {
+		NV_ERROR(dev, "error creating thingo1: %d\n", ret);
+		return ret;
+	}
+
+just_reset:
+	nv50_fifo_init_reset(dev);
+	nv50_fifo_init_intr(dev);
+	nv50_fifo_init_context_table(dev);
+	nv50_fifo_init_regs__nv(dev);
+	nv50_fifo_init_regs(dev);
+	dev_priv->engine.fifo.enable(dev);
+	dev_priv->engine.fifo.reassign(dev, true);
+
+	return 0;
+}
+
+void
+nv50_fifo_takedown(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv;
+
+	NV_DEBUG(dev, "\n");
+
+	if (!priv)
+		return;
+
+	nouveau_gpuobj_ref_del(dev, &priv->thingo[0]);
+	nouveau_gpuobj_ref_del(dev, &priv->thingo[1]);
+
+	dev_priv->engine.fifo.priv = NULL;
+	kfree(priv);
+}
+
+int
+nv50_fifo_channel_id(struct drm_device *dev)
+{
+	return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
+			NV50_PFIFO_CACHE1_PUSH1_CHID_MASK;
+}
+
+int
+nv50_fifo_create_context(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj *ramfc = NULL;
+	int ret;
+
+	NV_DEBUG(dev, "ch%d\n", chan->id);
+
+	if (IS_G80) {
+		uint32_t ramin_poffset = chan->ramin->gpuobj->im_pramin->start;
+		uint32_t ramin_voffset = chan->ramin->gpuobj->im_backing_start;
+
+		ret = nouveau_gpuobj_new_fake(dev, ramin_poffset, ramin_voffset,
+					      0x100, NVOBJ_FLAG_ZERO_ALLOC |
+					      NVOBJ_FLAG_ZERO_FREE, &ramfc,
+					      &chan->ramfc);
+		if (ret)
+			return ret;
+
+		ret = nouveau_gpuobj_new_fake(dev, ramin_poffset + 0x0400,
+					      ramin_voffset + 0x0400, 4096,
+					      0, NULL, &chan->cache);
+		if (ret)
+			return ret;
+	} else {
+		ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256,
+					     NVOBJ_FLAG_ZERO_ALLOC |
+					     NVOBJ_FLAG_ZERO_FREE,
+					     &chan->ramfc);
+		if (ret)
+			return ret;
+		ramfc = chan->ramfc->gpuobj;
+
+		ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 256,
+					     0, &chan->cache);
+		if (ret)
+			return ret;
+	}
+
+	dev_priv->engine.instmem.prepare_access(dev, true);
+
+	nv_wo32(dev, ramfc, 0x08/4, chan->pushbuf_base);
+	nv_wo32(dev, ramfc, 0x10/4, chan->pushbuf_base);
+	nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4);
+	nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4));
+	nv_wo32(dev, ramfc, 0x3c/4, 0x00086078);
+	nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff);
+	nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff);
+	nv_wo32(dev, ramfc, 0x40/4, 0x00000000);
+	nv_wo32(dev, ramfc, 0x7c/4, 0x30000001);
+	nv_wo32(dev, ramfc, 0x78/4, 0x00000000);
+	nv_wo32(dev, ramfc, 0x4c/4, 0xffffffff);
+
+	if (!IS_G80) {
+		nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id);
+		nv_wo32(dev, chan->ramin->gpuobj, 1,
+						chan->ramfc->instance >> 8);
+
+		nv_wo32(dev, ramfc, 0x88/4, chan->cache->instance >> 10);
+		nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12);
+	}
+
+	dev_priv->engine.instmem.finish_access(dev);
+
+	ret = nv50_fifo_channel_enable(dev, chan->id, false);
+	if (ret) {
+		NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret);
+		nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+		return ret;
+	}
+
+	return 0;
+}
+
+void
+nv50_fifo_destroy_context(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+
+	NV_DEBUG(dev, "ch%d\n", chan->id);
+
+	nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+	nouveau_gpuobj_ref_del(dev, &chan->cache);
+
+	nv50_fifo_channel_disable(dev, chan->id, false);
+
+	/* Dummy channel, also used on ch 127 */
+	if (chan->id == 0)
+		nv50_fifo_channel_disable(dev, 127, false);
+}
+
+int
+nv50_fifo_load_context(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj;
+	struct nouveau_gpuobj *cache = chan->cache->gpuobj;
+	int ptr, cnt;
+
+	NV_DEBUG(dev, "ch%d\n", chan->id);
+
+	dev_priv->engine.instmem.prepare_access(dev, false);
+
+	nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4));
+	nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4));
+	nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4));
+	nv_wr32(dev, 0x3320, nv_ro32(dev, ramfc, 0x0c/4));
+	nv_wr32(dev, 0x3244, nv_ro32(dev, ramfc, 0x10/4));
+	nv_wr32(dev, 0x3328, nv_ro32(dev, ramfc, 0x14/4));
+	nv_wr32(dev, 0x3368, nv_ro32(dev, ramfc, 0x18/4));
+	nv_wr32(dev, 0x336c, nv_ro32(dev, ramfc, 0x1c/4));
+	nv_wr32(dev, 0x3370, nv_ro32(dev, ramfc, 0x20/4));
+	nv_wr32(dev, 0x3374, nv_ro32(dev, ramfc, 0x24/4));
+	nv_wr32(dev, 0x3378, nv_ro32(dev, ramfc, 0x28/4));
+	nv_wr32(dev, 0x337c, nv_ro32(dev, ramfc, 0x2c/4));
+	nv_wr32(dev, 0x3228, nv_ro32(dev, ramfc, 0x30/4));
+	nv_wr32(dev, 0x3364, nv_ro32(dev, ramfc, 0x34/4));
+	nv_wr32(dev, 0x32a0, nv_ro32(dev, ramfc, 0x38/4));
+	nv_wr32(dev, 0x3224, nv_ro32(dev, ramfc, 0x3c/4));
+	nv_wr32(dev, 0x324c, nv_ro32(dev, ramfc, 0x40/4));
+	nv_wr32(dev, 0x2044, nv_ro32(dev, ramfc, 0x44/4));
+	nv_wr32(dev, 0x322c, nv_ro32(dev, ramfc, 0x48/4));
+	nv_wr32(dev, 0x3234, nv_ro32(dev, ramfc, 0x4c/4));
+	nv_wr32(dev, 0x3340, nv_ro32(dev, ramfc, 0x50/4));
+	nv_wr32(dev, 0x3344, nv_ro32(dev, ramfc, 0x54/4));
+	nv_wr32(dev, 0x3280, nv_ro32(dev, ramfc, 0x58/4));
+	nv_wr32(dev, 0x3254, nv_ro32(dev, ramfc, 0x5c/4));
+	nv_wr32(dev, 0x3260, nv_ro32(dev, ramfc, 0x60/4));
+	nv_wr32(dev, 0x3264, nv_ro32(dev, ramfc, 0x64/4));
+	nv_wr32(dev, 0x3268, nv_ro32(dev, ramfc, 0x68/4));
+	nv_wr32(dev, 0x326c, nv_ro32(dev, ramfc, 0x6c/4));
+	nv_wr32(dev, 0x32e4, nv_ro32(dev, ramfc, 0x70/4));
+	nv_wr32(dev, 0x3248, nv_ro32(dev, ramfc, 0x74/4));
+	nv_wr32(dev, 0x2088, nv_ro32(dev, ramfc, 0x78/4));
+	nv_wr32(dev, 0x2058, nv_ro32(dev, ramfc, 0x7c/4));
+	nv_wr32(dev, 0x2210, nv_ro32(dev, ramfc, 0x80/4));
+
+	cnt = nv_ro32(dev, ramfc, 0x84/4);
+	for (ptr = 0; ptr < cnt; ptr++) {
+		nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr),
+			nv_ro32(dev, cache, (ptr * 2) + 0));
+		nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
+			nv_ro32(dev, cache, (ptr * 2) + 1));
+	}
+	nv_wr32(dev, 0x3210, cnt << 2);
+	nv_wr32(dev, 0x3270, 0);
+
+	/* guessing that all the 0x34xx regs aren't on NV50 */
+	if (!IS_G80) {
+		nv_wr32(dev, 0x340c, nv_ro32(dev, ramfc, 0x88/4));
+		nv_wr32(dev, 0x3400, nv_ro32(dev, ramfc, 0x8c/4));
+		nv_wr32(dev, 0x3404, nv_ro32(dev, ramfc, 0x90/4));
+		nv_wr32(dev, 0x3408, nv_ro32(dev, ramfc, 0x94/4));
+		nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4));
+	}
+
+	dev_priv->engine.instmem.finish_access(dev);
+
+	nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
+	nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
+	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
+	return 0;
+}
+
+int
+nv50_fifo_unload_context(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	struct nouveau_gpuobj *ramfc, *cache;
+	struct nouveau_channel *chan = NULL;
+	int chid, get, put, ptr;
+
+	NV_DEBUG(dev, "\n");
+
+	chid = pfifo->channel_id(dev);
+	if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
+		return 0;
+
+	chan = dev_priv->fifos[chid];
+	if (!chan) {
+		NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
+		return -EINVAL;
+	}
+	NV_DEBUG(dev, "ch%d\n", chan->id);
+	ramfc = chan->ramfc->gpuobj;
+	cache = chan->cache->gpuobj;
+
+	dev_priv->engine.instmem.prepare_access(dev, true);
+
+	nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330));
+	nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334));
+	nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240));
+	nv_wo32(dev, ramfc, 0x0c/4, nv_rd32(dev, 0x3320));
+	nv_wo32(dev, ramfc, 0x10/4, nv_rd32(dev, 0x3244));
+	nv_wo32(dev, ramfc, 0x14/4, nv_rd32(dev, 0x3328));
+	nv_wo32(dev, ramfc, 0x18/4, nv_rd32(dev, 0x3368));
+	nv_wo32(dev, ramfc, 0x1c/4, nv_rd32(dev, 0x336c));
+	nv_wo32(dev, ramfc, 0x20/4, nv_rd32(dev, 0x3370));
+	nv_wo32(dev, ramfc, 0x24/4, nv_rd32(dev, 0x3374));
+	nv_wo32(dev, ramfc, 0x28/4, nv_rd32(dev, 0x3378));
+	nv_wo32(dev, ramfc, 0x2c/4, nv_rd32(dev, 0x337c));
+	nv_wo32(dev, ramfc, 0x30/4, nv_rd32(dev, 0x3228));
+	nv_wo32(dev, ramfc, 0x34/4, nv_rd32(dev, 0x3364));
+	nv_wo32(dev, ramfc, 0x38/4, nv_rd32(dev, 0x32a0));
+	nv_wo32(dev, ramfc, 0x3c/4, nv_rd32(dev, 0x3224));
+	nv_wo32(dev, ramfc, 0x40/4, nv_rd32(dev, 0x324c));
+	nv_wo32(dev, ramfc, 0x44/4, nv_rd32(dev, 0x2044));
+	nv_wo32(dev, ramfc, 0x48/4, nv_rd32(dev, 0x322c));
+	nv_wo32(dev, ramfc, 0x4c/4, nv_rd32(dev, 0x3234));
+	nv_wo32(dev, ramfc, 0x50/4, nv_rd32(dev, 0x3340));
+	nv_wo32(dev, ramfc, 0x54/4, nv_rd32(dev, 0x3344));
+	nv_wo32(dev, ramfc, 0x58/4, nv_rd32(dev, 0x3280));
+	nv_wo32(dev, ramfc, 0x5c/4, nv_rd32(dev, 0x3254));
+	nv_wo32(dev, ramfc, 0x60/4, nv_rd32(dev, 0x3260));
+	nv_wo32(dev, ramfc, 0x64/4, nv_rd32(dev, 0x3264));
+	nv_wo32(dev, ramfc, 0x68/4, nv_rd32(dev, 0x3268));
+	nv_wo32(dev, ramfc, 0x6c/4, nv_rd32(dev, 0x326c));
+	nv_wo32(dev, ramfc, 0x70/4, nv_rd32(dev, 0x32e4));
+	nv_wo32(dev, ramfc, 0x74/4, nv_rd32(dev, 0x3248));
+	nv_wo32(dev, ramfc, 0x78/4, nv_rd32(dev, 0x2088));
+	nv_wo32(dev, ramfc, 0x7c/4, nv_rd32(dev, 0x2058));
+	nv_wo32(dev, ramfc, 0x80/4, nv_rd32(dev, 0x2210));
+
+	put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2;
+	get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2;
+	ptr = 0;
+	while (put != get) {
+		nv_wo32(dev, cache, ptr++,
+			    nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get)));
+		nv_wo32(dev, cache, ptr++,
+			    nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get)));
+		get = (get + 1) & 0x1ff;
+	}
+
+	/* guessing that all the 0x34xx regs aren't on NV50 */
+	if (!IS_G80) {
+		nv_wo32(dev, ramfc, 0x84/4, ptr >> 1);
+		nv_wo32(dev, ramfc, 0x88/4, nv_rd32(dev, 0x340c));
+		nv_wo32(dev, ramfc, 0x8c/4, nv_rd32(dev, 0x3400));
+		nv_wo32(dev, ramfc, 0x90/4, nv_rd32(dev, 0x3404));
+		nv_wo32(dev, ramfc, 0x94/4, nv_rd32(dev, 0x3408));
+		nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410));
+	}
+
+	dev_priv->engine.instmem.finish_access(dev);
+
+	/*XXX: probably reload ch127 (NULL) state back too */
+	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127);
+	return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
new file mode 100644
index 0000000..177d822
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -0,0 +1,385 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+MODULE_FIRMWARE("nouveau/nv50.ctxprog");
+MODULE_FIRMWARE("nouveau/nv50.ctxvals");
+MODULE_FIRMWARE("nouveau/nv84.ctxprog");
+MODULE_FIRMWARE("nouveau/nv84.ctxvals");
+MODULE_FIRMWARE("nouveau/nv86.ctxprog");
+MODULE_FIRMWARE("nouveau/nv86.ctxvals");
+MODULE_FIRMWARE("nouveau/nv92.ctxprog");
+MODULE_FIRMWARE("nouveau/nv92.ctxvals");
+MODULE_FIRMWARE("nouveau/nv94.ctxprog");
+MODULE_FIRMWARE("nouveau/nv94.ctxvals");
+MODULE_FIRMWARE("nouveau/nv96.ctxprog");
+MODULE_FIRMWARE("nouveau/nv96.ctxvals");
+MODULE_FIRMWARE("nouveau/nv98.ctxprog");
+MODULE_FIRMWARE("nouveau/nv98.ctxvals");
+MODULE_FIRMWARE("nouveau/nva0.ctxprog");
+MODULE_FIRMWARE("nouveau/nva0.ctxvals");
+MODULE_FIRMWARE("nouveau/nva5.ctxprog");
+MODULE_FIRMWARE("nouveau/nva5.ctxvals");
+MODULE_FIRMWARE("nouveau/nva8.ctxprog");
+MODULE_FIRMWARE("nouveau/nva8.ctxvals");
+MODULE_FIRMWARE("nouveau/nvaa.ctxprog");
+MODULE_FIRMWARE("nouveau/nvaa.ctxvals");
+MODULE_FIRMWARE("nouveau/nvac.ctxprog");
+MODULE_FIRMWARE("nouveau/nvac.ctxvals");
+
+#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
+
+static void
+nv50_graph_init_reset(struct drm_device *dev)
+{
+	uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21);
+
+	NV_DEBUG(dev, "\n");
+
+	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
+	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |  pmc_e);
+}
+
+static void
+nv50_graph_init_intr(struct drm_device *dev)
+{
+	NV_DEBUG(dev, "\n");
+
+	nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff);
+	nv_wr32(dev, 0x400138, 0xffffffff);
+	nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff);
+}
+
+static void
+nv50_graph_init_regs__nv(struct drm_device *dev)
+{
+	NV_DEBUG(dev, "\n");
+
+	nv_wr32(dev, 0x400804, 0xc0000000);
+	nv_wr32(dev, 0x406800, 0xc0000000);
+	nv_wr32(dev, 0x400c04, 0xc0000000);
+	nv_wr32(dev, 0x401804, 0xc0000000);
+	nv_wr32(dev, 0x405018, 0xc0000000);
+	nv_wr32(dev, 0x402000, 0xc0000000);
+
+	nv_wr32(dev, 0x400108, 0xffffffff);
+
+	nv_wr32(dev, 0x400824, 0x00004000);
+	nv_wr32(dev, 0x400500, 0x00010001);
+}
+
+static void
+nv50_graph_init_regs(struct drm_device *dev)
+{
+	NV_DEBUG(dev, "\n");
+
+	nv_wr32(dev, NV04_PGRAPH_DEBUG_3,
+				(1 << 2) /* HW_CONTEXT_SWITCH_ENABLED */);
+	nv_wr32(dev, 0x402ca8, 0x800);
+}
+
+static int
+nv50_graph_init_ctxctl(struct drm_device *dev)
+{
+	NV_DEBUG(dev, "\n");
+
+	nv40_grctx_init(dev);
+
+	nv_wr32(dev, 0x400320, 4);
+	nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
+	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0);
+	return 0;
+}
+
+int
+nv50_graph_init(struct drm_device *dev)
+{
+	int ret;
+
+	NV_DEBUG(dev, "\n");
+
+	nv50_graph_init_reset(dev);
+	nv50_graph_init_regs__nv(dev);
+	nv50_graph_init_regs(dev);
+	nv50_graph_init_intr(dev);
+
+	ret = nv50_graph_init_ctxctl(dev);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+void
+nv50_graph_takedown(struct drm_device *dev)
+{
+	NV_DEBUG(dev, "\n");
+	nv40_grctx_fini(dev);
+}
+
+void
+nv50_graph_fifo_access(struct drm_device *dev, bool enabled)
+{
+	const uint32_t mask = 0x00010001;
+
+	if (enabled)
+		nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask);
+	else
+		nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask);
+}
+
+struct nouveau_channel *
+nv50_graph_channel(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t inst;
+	int i;
+
+	inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
+	if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
+		return NULL;
+	inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
+
+	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+		struct nouveau_channel *chan = dev_priv->fifos[i];
+
+		if (chan && chan->ramin && chan->ramin->instance == inst)
+			return chan;
+	}
+
+	return NULL;
+}
+
+int
+nv50_graph_create_context(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
+	struct nouveau_gpuobj *ctx;
+	uint32_t grctx_size = 0x70000;
+	int hdr, ret;
+
+	NV_DEBUG(dev, "ch%d\n", chan->id);
+
+	ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, grctx_size, 0x1000,
+				     NVOBJ_FLAG_ZERO_ALLOC |
+				     NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
+	if (ret)
+		return ret;
+	ctx = chan->ramin_grctx->gpuobj;
+
+	hdr = IS_G80 ? 0x200 : 0x20;
+	dev_priv->engine.instmem.prepare_access(dev, true);
+	nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002);
+	nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
+					   grctx_size - 1);
+	nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance);
+	nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0);
+	nv_wo32(dev, ramin, (hdr + 0x10)/4, 0);
+	nv_wo32(dev, ramin, (hdr + 0x14)/4, 0x00010000);
+	dev_priv->engine.instmem.finish_access(dev);
+
+	dev_priv->engine.instmem.prepare_access(dev, true);
+	nv40_grctx_vals_load(dev, ctx);
+	nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12);
+	if ((dev_priv->chipset & 0xf0) == 0xa0)
+		nv_wo32(dev, ctx, 0x00004/4, 0x00000000);
+	else
+		nv_wo32(dev, ctx, 0x0011c/4, 0x00000000);
+	dev_priv->engine.instmem.finish_access(dev);
+
+	return 0;
+}
+
+void
+nv50_graph_destroy_context(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int i, hdr = IS_G80 ? 0x200 : 0x20;
+
+	NV_DEBUG(dev, "ch%d\n", chan->id);
+
+	if (!chan->ramin || !chan->ramin->gpuobj)
+		return;
+
+	dev_priv->engine.instmem.prepare_access(dev, true);
+	for (i = hdr; i < hdr + 24; i += 4)
+		nv_wo32(dev, chan->ramin->gpuobj, i/4, 0);
+	dev_priv->engine.instmem.finish_access(dev);
+
+	nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
+}
+
+static int
+nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst)
+{
+	uint32_t fifo = nv_rd32(dev, 0x400500);
+
+	nv_wr32(dev, 0x400500, fifo & ~1);
+	nv_wr32(dev, 0x400784, inst);
+	nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40);
+	nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11);
+	nv_wr32(dev, 0x400040, 0xffffffff);
+	(void)nv_rd32(dev, 0x400040);
+	nv_wr32(dev, 0x400040, 0x00000000);
+	nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1);
+
+	if (nouveau_wait_for_idle(dev))
+		nv_wr32(dev, 0x40032c, inst | (1<<31));
+	nv_wr32(dev, 0x400500, fifo);
+
+	return 0;
+}
+
+int
+nv50_graph_load_context(struct nouveau_channel *chan)
+{
+	uint32_t inst = chan->ramin->instance >> 12;
+
+	NV_DEBUG(chan->dev, "ch%d\n", chan->id);
+	return nv50_graph_do_load_context(chan->dev, inst);
+}
+
+int
+nv50_graph_unload_context(struct drm_device *dev)
+{
+	uint32_t inst, fifo = nv_rd32(dev, 0x400500);
+
+	inst  = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
+	if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
+		return 0;
+	inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
+
+	nv_wr32(dev, 0x400500, fifo & ~1);
+	nv_wr32(dev, 0x400784, inst);
+	nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
+	nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
+	nouveau_wait_for_idle(dev);
+	nv_wr32(dev, 0x400500, fifo);
+
+	nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
+	return 0;
+}
+
+void
+nv50_graph_context_switch(struct drm_device *dev)
+{
+	uint32_t inst;
+
+	nv50_graph_unload_context(dev);
+
+	inst  = nv_rd32(dev, NV50_PGRAPH_CTXCTL_NEXT);
+	inst &= NV50_PGRAPH_CTXCTL_NEXT_INSTANCE;
+	nv50_graph_do_load_context(dev, inst);
+
+	nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
+		NV40_PGRAPH_INTR_EN) | NV_PGRAPH_INTR_CONTEXT_SWITCH);
+}
+
+static int
+nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass,
+			   int mthd, uint32_t data)
+{
+	struct nouveau_gpuobj_ref *ref = NULL;
+
+	if (nouveau_gpuobj_ref_find(chan, data, &ref))
+		return -ENOENT;
+
+	if (nouveau_notifier_offset(ref->gpuobj, NULL))
+		return -EINVAL;
+
+	chan->nvsw.vblsem = ref->gpuobj;
+	chan->nvsw.vblsem_offset = ~0;
+	return 0;
+}
+
+static int
+nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass,
+			      int mthd, uint32_t data)
+{
+	if (nouveau_notifier_offset(chan->nvsw.vblsem, &data))
+		return -ERANGE;
+
+	chan->nvsw.vblsem_offset = data >> 2;
+	return 0;
+}
+
+static int
+nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, int grclass,
+				   int mthd, uint32_t data)
+{
+	chan->nvsw.vblsem_rval = data;
+	return 0;
+}
+
+static int
+nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass,
+			       int mthd, uint32_t data)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1)
+		return -EINVAL;
+
+	if (!(nv_rd32(dev, NV50_PDISPLAY_INTR_EN) &
+		      NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data))) {
+		nv_wr32(dev, NV50_PDISPLAY_INTR_1,
+			NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(data));
+		nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
+			NV50_PDISPLAY_INTR_EN) |
+			NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data));
+	}
+
+	list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting);
+	return 0;
+}
+
+static struct nouveau_pgraph_object_method nv50_graph_nvsw_methods[] = {
+	{ 0x018c, nv50_graph_nvsw_dma_vblsem },
+	{ 0x0400, nv50_graph_nvsw_vblsem_offset },
+	{ 0x0404, nv50_graph_nvsw_vblsem_release_val },
+	{ 0x0408, nv50_graph_nvsw_vblsem_release },
+	{}
+};
+
+struct nouveau_pgraph_object_class nv50_graph_grclass[] = {
+	{ 0x506e, true, nv50_graph_nvsw_methods }, /* nvsw */
+	{ 0x0030, false, NULL }, /* null */
+	{ 0x5039, false, NULL }, /* m2mf */
+	{ 0x502d, false, NULL }, /* 2d */
+	{ 0x50c0, false, NULL }, /* compute */
+	{ 0x5097, false, NULL }, /* tesla (nv50) */
+	{ 0x8297, false, NULL }, /* tesla (nv80/nv90) */
+	{ 0x8397, false, NULL }, /* tesla (nva0) */
+	{ 0x8597, false, NULL }, /* tesla (nva8) */
+	{}
+};
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
new file mode 100644
index 0000000..94400f7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -0,0 +1,509 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+struct nv50_instmem_priv {
+	uint32_t save1700[5]; /* 0x1700->0x1710 */
+
+	struct nouveau_gpuobj_ref *pramin_pt;
+	struct nouveau_gpuobj_ref *pramin_bar;
+	struct nouveau_gpuobj_ref *fb_bar;
+
+	bool last_access_wr;
+};
+
+#define NV50_INSTMEM_PAGE_SHIFT 12
+#define NV50_INSTMEM_PAGE_SIZE  (1 << NV50_INSTMEM_PAGE_SHIFT)
+#define NV50_INSTMEM_PT_SIZE(a)	(((a) >> 12) << 3)
+
+/*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN
+ */
+#define BAR0_WI32(g, o, v) do {                                   \
+	uint32_t offset;                                          \
+	if ((g)->im_backing) {                                    \
+		offset = (g)->im_backing_start;                   \
+	} else {                                                  \
+		offset  = chan->ramin->gpuobj->im_backing_start;  \
+		offset += (g)->im_pramin->start;                  \
+	}                                                         \
+	offset += (o);                                            \
+	nv_wr32(dev, NV_RAMIN + (offset & 0xfffff), (v));              \
+} while (0)
+
+int
+nv50_instmem_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan;
+	uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size;
+	struct nv50_instmem_priv *priv;
+	int ret, i;
+	uint32_t v, save_nv001700;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+	dev_priv->engine.instmem.priv = priv;
+
+	/* Save state, will restore at takedown. */
+	for (i = 0x1700; i <= 0x1710; i += 4)
+		priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
+
+	/* Reserve the last MiB of VRAM, we should probably try to avoid
+	 * setting up the below tables over the top of the VBIOS image at
+	 * some point.
+	 */
+	dev_priv->ramin_rsvd_vram = 1 << 20;
+	c_offset = nouveau_mem_fb_amount(dev) - dev_priv->ramin_rsvd_vram;
+	c_size   = 128 << 10;
+	c_vmpd   = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200;
+	c_ramfc  = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20;
+	c_base   = c_vmpd + 0x4000;
+	pt_size  = NV50_INSTMEM_PT_SIZE(dev_priv->ramin_size);
+
+	NV_DEBUG(dev, " Rsvd VRAM base: 0x%08x\n", c_offset);
+	NV_DEBUG(dev, "    VBIOS image: 0x%08x\n",
+				(nv_rd32(dev, 0x619f04) & ~0xff) << 8);
+	NV_DEBUG(dev, "  Aperture size: %d MiB\n", dev_priv->ramin_size >> 20);
+	NV_DEBUG(dev, "        PT size: %d KiB\n", pt_size >> 10);
+
+	/* Determine VM layout, we need to do this first to make sure
+	 * we allocate enough memory for all the page tables.
+	 */
+	dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK);
+	dev_priv->vm_gart_size = NV50_VM_BLOCK;
+
+	dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
+	dev_priv->vm_vram_size = nouveau_mem_fb_amount(dev);
+	if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
+		dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
+	dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
+	dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK;
+
+	dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size;
+
+	NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n",
+		 dev_priv->vm_gart_base,
+		 dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1);
+	NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n",
+		 dev_priv->vm_vram_base,
+		 dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1);
+
+	c_size += dev_priv->vm_vram_pt_nr * (NV50_VM_BLOCK / 65536 * 8);
+
+	/* Map BAR0 PRAMIN aperture over the memory we want to use */
+	save_nv001700 = nv_rd32(dev, NV50_PUNK_BAR0_PRAMIN);
+	nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (c_offset >> 16));
+
+	/* Create a fake channel, and use it as our "dummy" channels 0/127.
+	 * The main reason for creating a channel is so we can use the gpuobj
+	 * code.  However, it's probably worth noting that NVIDIA also setup
+	 * their channels 0/127 with the same values they configure here.
+	 * So, there may be some other reason for doing this.
+	 *
+	 * Have to create the entire channel manually, as the real channel
+	 * creation code assumes we have PRAMIN access, and we don't until
+	 * we're done here.
+	 */
+	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+	if (!chan)
+		return -ENOMEM;
+	chan->id = 0;
+	chan->dev = dev;
+	chan->file_priv = (struct drm_file *)-2;
+	dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
+
+	/* Channel's PRAMIN object + heap */
+	ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0,
+							NULL, &chan->ramin);
+	if (ret)
+		return ret;
+
+	if (nouveau_mem_init_heap(&chan->ramin_heap, c_base, c_size - c_base))
+		return -ENOMEM;
+
+	/* RAMFC + zero channel's PRAMIN up to start of VM pagedir */
+	ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc,
+						0x4000, 0, NULL, &chan->ramfc);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < c_vmpd; i += 4)
+		BAR0_WI32(chan->ramin->gpuobj, i, 0);
+
+	/* VM page directory */
+	ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd,
+					   0x4000, 0, &chan->vm_pd, NULL);
+	if (ret)
+		return ret;
+	for (i = 0; i < 0x4000; i += 8) {
+		BAR0_WI32(chan->vm_pd, i + 0x00, 0x00000000);
+		BAR0_WI32(chan->vm_pd, i + 0x04, 0x00000000);
+	}
+
+	/* PRAMIN page table, cheat and map into VM at 0x0000000000.
+	 * We map the entire fake channel into the start of the PRAMIN BAR
+	 */
+	ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
+							0, &priv->pramin_pt);
+	if (ret)
+		return ret;
+
+	for (i = 0, v = c_offset; i < pt_size; i += 8, v += 0x1000) {
+		if (v < (c_offset + c_size))
+			BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1);
+		else
+			BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009);
+		BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
+	}
+
+	BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
+	BAR0_WI32(chan->vm_pd, 0x04, 0x00000000);
+
+	/* VRAM page table(s), mapped into VM at +1GiB  */
+	for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
+		ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0,
+					     NV50_VM_BLOCK/65536*8, 0, 0,
+					     &chan->vm_vram_pt[i]);
+		if (ret) {
+			NV_ERROR(dev, "Error creating VRAM page tables: %d\n",
+									ret);
+			dev_priv->vm_vram_pt_nr = i;
+			return ret;
+		}
+		dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i]->gpuobj;
+
+		for (v = 0; v < dev_priv->vm_vram_pt[i]->im_pramin->size;
+								v += 4)
+			BAR0_WI32(dev_priv->vm_vram_pt[i], v, 0);
+
+		BAR0_WI32(chan->vm_pd, 0x10 + (i*8),
+			  chan->vm_vram_pt[i]->instance | 0x61);
+		BAR0_WI32(chan->vm_pd, 0x14 + (i*8), 0);
+	}
+
+	/* DMA object for PRAMIN BAR */
+	ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
+							&priv->pramin_bar);
+	if (ret)
+		return ret;
+	BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000);
+	BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin_size - 1);
+	BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000);
+	BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000);
+	BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000);
+	BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000);
+
+	/* DMA object for FB BAR */
+	ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
+							&priv->fb_bar);
+	if (ret)
+		return ret;
+	BAR0_WI32(priv->fb_bar->gpuobj, 0x00, 0x7fc00000);
+	BAR0_WI32(priv->fb_bar->gpuobj, 0x04, 0x40000000 +
+					      drm_get_resource_len(dev, 1) - 1);
+	BAR0_WI32(priv->fb_bar->gpuobj, 0x08, 0x40000000);
+	BAR0_WI32(priv->fb_bar->gpuobj, 0x0c, 0x00000000);
+	BAR0_WI32(priv->fb_bar->gpuobj, 0x10, 0x00000000);
+	BAR0_WI32(priv->fb_bar->gpuobj, 0x14, 0x00000000);
+
+	/* Poke the relevant regs, and pray it works :) */
+	nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
+	nv_wr32(dev, NV50_PUNK_UNK1710, 0);
+	nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
+					 NV50_PUNK_BAR_CFG_BASE_VALID);
+	nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) |
+					NV50_PUNK_BAR1_CTXDMA_VALID);
+	nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
+					NV50_PUNK_BAR3_CTXDMA_VALID);
+
+	for (i = 0; i < 8; i++)
+		nv_wr32(dev, 0x1900 + (i*4), 0);
+
+	/* Assume that praying isn't enough, check that we can re-read the
+	 * entire fake channel back from the PRAMIN BAR */
+	dev_priv->engine.instmem.prepare_access(dev, false);
+	for (i = 0; i < c_size; i += 4) {
+		if (nv_rd32(dev, NV_RAMIN + i) != nv_ri32(dev, i)) {
+			NV_ERROR(dev, "Error reading back PRAMIN at 0x%08x\n",
+									i);
+			dev_priv->engine.instmem.finish_access(dev);
+			return -EINVAL;
+		}
+	}
+	dev_priv->engine.instmem.finish_access(dev);
+
+	nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, save_nv001700);
+
+	/* Global PRAMIN heap */
+	if (nouveau_mem_init_heap(&dev_priv->ramin_heap,
+				  c_size, dev_priv->ramin_size - c_size)) {
+		dev_priv->ramin_heap = NULL;
+		NV_ERROR(dev, "Failed to init RAMIN heap\n");
+	}
+
+	/*XXX: incorrect, but needed to make hash func "work" */
+	dev_priv->ramht_offset = 0x10000;
+	dev_priv->ramht_bits   = 9;
+	dev_priv->ramht_size   = (1 << dev_priv->ramht_bits);
+	return 0;
+}
+
+void
+nv50_instmem_takedown(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
+	struct nouveau_channel *chan = dev_priv->fifos[0];
+	int i;
+
+	NV_DEBUG(dev, "\n");
+
+	if (!priv)
+		return;
+
+	/* Restore state from before init */
+	for (i = 0x1700; i <= 0x1710; i += 4)
+		nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
+
+	nouveau_gpuobj_ref_del(dev, &priv->fb_bar);
+	nouveau_gpuobj_ref_del(dev, &priv->pramin_bar);
+	nouveau_gpuobj_ref_del(dev, &priv->pramin_pt);
+
+	/* Destroy dummy channel */
+	if (chan) {
+		for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
+			nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
+			dev_priv->vm_vram_pt[i] = NULL;
+		}
+		dev_priv->vm_vram_pt_nr = 0;
+
+		nouveau_gpuobj_del(dev, &chan->vm_pd);
+		nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+		nouveau_gpuobj_ref_del(dev, &chan->ramin);
+		nouveau_mem_takedown(&chan->ramin_heap);
+
+		dev_priv->fifos[0] = dev_priv->fifos[127] = NULL;
+		kfree(chan);
+	}
+
+	dev_priv->engine.instmem.priv = NULL;
+	kfree(priv);
+}
+
+int
+nv50_instmem_suspend(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = dev_priv->fifos[0];
+	struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
+	int i;
+
+	ramin->im_backing_suspend = vmalloc(ramin->im_pramin->size);
+	if (!ramin->im_backing_suspend)
+		return -ENOMEM;
+
+	for (i = 0; i < ramin->im_pramin->size; i += 4)
+		ramin->im_backing_suspend[i/4] = nv_ri32(dev, i);
+	return 0;
+}
+
+void
+nv50_instmem_resume(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
+	struct nouveau_channel *chan = dev_priv->fifos[0];
+	struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
+	int i;
+
+	nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (ramin->im_backing_start >> 16));
+	for (i = 0; i < ramin->im_pramin->size; i += 4)
+		BAR0_WI32(ramin, i, ramin->im_backing_suspend[i/4]);
+	vfree(ramin->im_backing_suspend);
+	ramin->im_backing_suspend = NULL;
+
+	/* Poke the relevant regs, and pray it works :) */
+	nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
+	nv_wr32(dev, NV50_PUNK_UNK1710, 0);
+	nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
+					 NV50_PUNK_BAR_CFG_BASE_VALID);
+	nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) |
+					NV50_PUNK_BAR1_CTXDMA_VALID);
+	nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
+					NV50_PUNK_BAR3_CTXDMA_VALID);
+
+	for (i = 0; i < 8; i++)
+		nv_wr32(dev, 0x1900 + (i*4), 0);
+}
+
+int
+nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
+		      uint32_t *sz)
+{
+	int ret;
+
+	if (gpuobj->im_backing)
+		return -EINVAL;
+
+	*sz = (*sz + (NV50_INSTMEM_PAGE_SIZE-1)) & ~(NV50_INSTMEM_PAGE_SIZE-1);
+	if (*sz == 0)
+		return -EINVAL;
+
+	ret = nouveau_bo_new(dev, NULL, *sz, 0, TTM_PL_FLAG_VRAM, 0, 0x0000,
+			     true, false, &gpuobj->im_backing);
+	if (ret) {
+		NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
+		return ret;
+	}
+
+	ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
+	if (ret) {
+		NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
+		nouveau_bo_ref(NULL, &gpuobj->im_backing);
+		return ret;
+	}
+
+	gpuobj->im_backing_start = gpuobj->im_backing->bo.mem.mm_node->start;
+	gpuobj->im_backing_start <<= PAGE_SHIFT;
+
+	return 0;
+}
+
+void
+nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (gpuobj && gpuobj->im_backing) {
+		if (gpuobj->im_bound)
+			dev_priv->engine.instmem.unbind(dev, gpuobj);
+		nouveau_bo_unpin(gpuobj->im_backing);
+		nouveau_bo_ref(NULL, &gpuobj->im_backing);
+		gpuobj->im_backing = NULL;
+	}
+}
+
+int
+nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
+	uint32_t pte, pte_end, vram;
+
+	if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
+		return -EINVAL;
+
+	NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n",
+		 gpuobj->im_pramin->start, gpuobj->im_pramin->size);
+
+	pte     = (gpuobj->im_pramin->start >> 12) << 3;
+	pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
+	vram    = gpuobj->im_backing_start;
+
+	NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n",
+		 gpuobj->im_pramin->start, pte, pte_end);
+	NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
+
+	dev_priv->engine.instmem.prepare_access(dev, true);
+	while (pte < pte_end) {
+		nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1);
+		nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
+
+		pte += 8;
+		vram += NV50_INSTMEM_PAGE_SIZE;
+	}
+	dev_priv->engine.instmem.finish_access(dev);
+
+	nv_wr32(dev, 0x100c80, 0x00040001);
+	if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+		NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (1)\n");
+		NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
+		return -EBUSY;
+	}
+
+	nv_wr32(dev, 0x100c80, 0x00060001);
+	if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+		NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+		NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
+		return -EBUSY;
+	}
+
+	gpuobj->im_bound = 1;
+	return 0;
+}
+
+int
+nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
+	uint32_t pte, pte_end;
+
+	if (gpuobj->im_bound == 0)
+		return -EINVAL;
+
+	pte     = (gpuobj->im_pramin->start >> 12) << 3;
+	pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
+
+	dev_priv->engine.instmem.prepare_access(dev, true);
+	while (pte < pte_end) {
+		nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009);
+		nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
+		pte += 8;
+	}
+	dev_priv->engine.instmem.finish_access(dev);
+
+	gpuobj->im_bound = 0;
+	return 0;
+}
+
+void
+nv50_instmem_prepare_access(struct drm_device *dev, bool write)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
+
+	priv->last_access_wr = write;
+}
+
+void
+nv50_instmem_finish_access(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
+
+	if (priv->last_access_wr) {
+		nv_wr32(dev, 0x070000, 0x00000001);
+		if (!nv_wait(0x070000, 0x00000001, 0x00000000))
+			NV_ERROR(dev, "PRAMIN flush timeout\n");
+	}
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv50_mc.c b/drivers/gpu/drm/nouveau/nv50_mc.c
new file mode 100644
index 0000000..e0a9c3f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_mc.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+int
+nv50_mc_init(struct drm_device *dev)
+{
+	nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
+	return 0;
+}
+
+void nv50_mc_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
new file mode 100644
index 0000000..8c28046
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+
+#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
+#include "nouveau_reg.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_crtc.h"
+#include "nv50_display.h"
+
+static void
+nv50_sor_disconnect(struct nouveau_encoder *nv_encoder)
+{
+	struct drm_device *dev = to_drm_encoder(nv_encoder)->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *evo = dev_priv->evo;
+	int ret;
+
+	NV_DEBUG(dev, "Disconnecting SOR %d\n", nv_encoder->or);
+
+	ret = RING_SPACE(evo, 2);
+	if (ret) {
+		NV_ERROR(dev, "no space while disconnecting SOR\n");
+		return;
+	}
+	BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
+	OUT_RING(evo, 0);
+}
+
+static void
+nv50_sor_dp_link_train(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct bit_displayport_encoder_table *dpe;
+	int dpe_headerlen;
+
+	dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
+	if (!dpe) {
+		NV_ERROR(dev, "SOR-%d: no DP encoder table!\n", nv_encoder->or);
+		return;
+	}
+
+	if (dpe->script0) {
+		NV_DEBUG(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
+		nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0),
+					    nv_encoder->dcb);
+	}
+
+	if (!nouveau_dp_link_train(encoder))
+		NV_ERROR(dev, "SOR-%d: link training failed\n", nv_encoder->or);
+
+	if (dpe->script1) {
+		NV_DEBUG(dev, "SOR-%d: running DP script 1\n", nv_encoder->or);
+		nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1),
+					    nv_encoder->dcb);
+	}
+}
+
+static void
+nv50_sor_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	uint32_t val;
+	int or = nv_encoder->or;
+
+	NV_DEBUG(dev, "or %d mode %d\n", or, mode);
+
+	/* wait for it to be done */
+	if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or),
+		     NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
+		NV_ERROR(dev, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or);
+		NV_ERROR(dev, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or,
+			 nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or)));
+	}
+
+	val = nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or));
+
+	if (mode == DRM_MODE_DPMS_ON)
+		val |= NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
+	else
+		val &= ~NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
+
+	nv_wr32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val |
+		NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING);
+	if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(or),
+		     NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
+		NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or);
+		NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", or,
+			 nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or)));
+	}
+
+	if (nv_encoder->dcb->type == OUTPUT_DP && mode == DRM_MODE_DPMS_ON)
+		nv50_sor_dp_link_train(encoder);
+}
+
+static void
+nv50_sor_save(struct drm_encoder *encoder)
+{
+	NV_ERROR(encoder->dev, "!!\n");
+}
+
+static void
+nv50_sor_restore(struct drm_encoder *encoder)
+{
+	NV_ERROR(encoder->dev, "!!\n");
+}
+
+static bool
+nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+		    struct drm_display_mode *adjusted_mode)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_connector *connector;
+
+	NV_DEBUG(encoder->dev, "or %d\n", nv_encoder->or);
+
+	connector = nouveau_encoder_connector_get(nv_encoder);
+	if (!connector) {
+		NV_ERROR(encoder->dev, "Encoder has no connector\n");
+		return false;
+	}
+
+	if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
+	     connector->native_mode) {
+		int id = adjusted_mode->base.id;
+		*adjusted_mode = *connector->native_mode;
+		adjusted_mode->base.id = id;
+	}
+
+	return true;
+}
+
+static void
+nv50_sor_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_sor_commit(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+		  struct drm_display_mode *adjusted_mode)
+{
+	struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
+	struct nouveau_channel *evo = dev_priv->evo;
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
+	uint32_t mode_ctl = 0;
+	int ret;
+
+	NV_DEBUG(dev, "or %d\n", nv_encoder->or);
+
+	nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
+
+	switch (nv_encoder->dcb->type) {
+	case OUTPUT_TMDS:
+		if (nv_encoder->dcb->sorconf.link & 1) {
+			if (adjusted_mode->clock < 165000)
+				mode_ctl = 0x0100;
+			else
+				mode_ctl = 0x0500;
+		} else
+			mode_ctl = 0x0200;
+		break;
+	case OUTPUT_DP:
+		mode_ctl |= 0x00050000;
+		if (nv_encoder->dcb->sorconf.link & 1)
+			mode_ctl |= 0x00000800;
+		else
+			mode_ctl |= 0x00000900;
+		break;
+	default:
+		break;
+	}
+
+	if (crtc->index == 1)
+		mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC1;
+	else
+		mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0;
+
+	if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+		mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC;
+
+	if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+		mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;
+
+	ret = RING_SPACE(evo, 2);
+	if (ret) {
+		NV_ERROR(dev, "no space while connecting SOR\n");
+		return;
+	}
+	BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
+	OUT_RING(evo, mode_ctl);
+}
+
+static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = {
+	.dpms = nv50_sor_dpms,
+	.save = nv50_sor_save,
+	.restore = nv50_sor_restore,
+	.mode_fixup = nv50_sor_mode_fixup,
+	.prepare = nv50_sor_prepare,
+	.commit = nv50_sor_commit,
+	.mode_set = nv50_sor_mode_set,
+	.detect = NULL
+};
+
+static void
+nv50_sor_destroy(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+	if (!encoder)
+		return;
+
+	NV_DEBUG(encoder->dev, "\n");
+
+	drm_encoder_cleanup(encoder);
+
+	kfree(nv_encoder);
+}
+
+static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
+	.destroy = nv50_sor_destroy,
+};
+
+int
+nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
+{
+	struct nouveau_encoder *nv_encoder = NULL;
+	struct drm_encoder *encoder;
+	bool dum;
+	int type;
+
+	NV_DEBUG(dev, "\n");
+
+	switch (entry->type) {
+	case OUTPUT_TMDS:
+		NV_INFO(dev, "Detected a TMDS output\n");
+		type = DRM_MODE_ENCODER_TMDS;
+		break;
+	case OUTPUT_LVDS:
+		NV_INFO(dev, "Detected a LVDS output\n");
+		type = DRM_MODE_ENCODER_LVDS;
+
+		if (nouveau_bios_parse_lvds_table(dev, 0, &dum, &dum)) {
+			NV_ERROR(dev, "Failed parsing LVDS table\n");
+			return -EINVAL;
+		}
+		break;
+	case OUTPUT_DP:
+		NV_INFO(dev, "Detected a DP output\n");
+		type = DRM_MODE_ENCODER_TMDS;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+	if (!nv_encoder)
+		return -ENOMEM;
+	encoder = to_drm_encoder(nv_encoder);
+
+	nv_encoder->dcb = entry;
+	nv_encoder->or = ffs(entry->or) - 1;
+
+	nv_encoder->disconnect = nv50_sor_disconnect;
+
+	drm_encoder_init(dev, encoder, &nv50_sor_encoder_funcs, type);
+	drm_encoder_helper_add(encoder, &nv50_sor_helper_funcs);
+
+	encoder->possible_crtcs = entry->heads;
+	encoder->possible_clones = 0;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h
new file mode 100644
index 0000000..5998c35
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvreg.h
@@ -0,0 +1,535 @@
+/* $XConsortium: nvreg.h /main/2 1996/10/28 05:13:41 kaleb $ */
+/*
+ * Copyright 1996-1997  David J. McKay
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * DAVID J. MCKAY BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/nv/nvreg.h,v 1.6 2002/01/25 21:56:06 tsi Exp $ */
+
+#ifndef __NVREG_H_
+#define __NVREG_H_
+
+#define NV_PMC_OFFSET               0x00000000
+#define NV_PMC_SIZE                 0x00001000
+
+#define NV_PBUS_OFFSET              0x00001000
+#define NV_PBUS_SIZE                0x00001000
+
+#define NV_PFIFO_OFFSET             0x00002000
+#define NV_PFIFO_SIZE               0x00002000
+
+#define NV_HDIAG_OFFSET             0x00005000
+#define NV_HDIAG_SIZE               0x00001000
+
+#define NV_PRAM_OFFSET              0x00006000
+#define NV_PRAM_SIZE                0x00001000
+
+#define NV_PVIDEO_OFFSET            0x00008000
+#define NV_PVIDEO_SIZE              0x00001000
+
+#define NV_PTIMER_OFFSET            0x00009000
+#define NV_PTIMER_SIZE              0x00001000
+
+#define NV_PPM_OFFSET               0x0000A000
+#define NV_PPM_SIZE                 0x00001000
+
+#define NV_PTV_OFFSET               0x0000D000
+#define NV_PTV_SIZE                 0x00001000
+
+#define NV_PRMVGA_OFFSET            0x000A0000
+#define NV_PRMVGA_SIZE              0x00020000
+
+#define NV_PRMVIO0_OFFSET           0x000C0000
+#define NV_PRMVIO_SIZE              0x00002000
+#define NV_PRMVIO1_OFFSET           0x000C2000
+
+#define NV_PFB_OFFSET               0x00100000
+#define NV_PFB_SIZE                 0x00001000
+
+#define NV_PEXTDEV_OFFSET           0x00101000
+#define NV_PEXTDEV_SIZE             0x00001000
+
+#define NV_PME_OFFSET               0x00200000
+#define NV_PME_SIZE                 0x00001000
+
+#define NV_PROM_OFFSET              0x00300000
+#define NV_PROM_SIZE                0x00010000
+
+#define NV_PGRAPH_OFFSET            0x00400000
+#define NV_PGRAPH_SIZE              0x00010000
+
+#define NV_PCRTC0_OFFSET            0x00600000
+#define NV_PCRTC0_SIZE              0x00002000 /* empirical */
+
+#define NV_PRMCIO0_OFFSET           0x00601000
+#define NV_PRMCIO_SIZE              0x00002000
+#define NV_PRMCIO1_OFFSET           0x00603000
+
+#define NV50_DISPLAY_OFFSET           0x00610000
+#define NV50_DISPLAY_SIZE             0x0000FFFF
+
+#define NV_PRAMDAC0_OFFSET          0x00680000
+#define NV_PRAMDAC0_SIZE            0x00002000
+
+#define NV_PRMDIO0_OFFSET           0x00681000
+#define NV_PRMDIO_SIZE              0x00002000
+#define NV_PRMDIO1_OFFSET           0x00683000
+
+#define NV_PRAMIN_OFFSET            0x00700000
+#define NV_PRAMIN_SIZE              0x00100000
+
+#define NV_FIFO_OFFSET              0x00800000
+#define NV_FIFO_SIZE                0x00800000
+
+#define NV_PMC_BOOT_0			0x00000000
+#define NV_PMC_ENABLE			0x00000200
+
+#define NV_VIO_VSE2			0x000003c3
+#define NV_VIO_SRX			0x000003c4
+
+#define NV_CIO_CRX__COLOR		0x000003d4
+#define NV_CIO_CR__COLOR		0x000003d5
+
+#define NV_PBUS_DEBUG_1			0x00001084
+#define NV_PBUS_DEBUG_4			0x00001098
+#define NV_PBUS_DEBUG_DUALHEAD_CTL	0x000010f0
+#define NV_PBUS_POWERCTRL_1		0x00001584
+#define NV_PBUS_POWERCTRL_2		0x00001588
+#define NV_PBUS_POWERCTRL_4		0x00001590
+#define NV_PBUS_PCI_NV_19		0x0000184C
+#define NV_PBUS_PCI_NV_20		0x00001850
+#	define NV_PBUS_PCI_NV_20_ROM_SHADOW_DISABLED	(0 << 0)
+#	define NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED	(1 << 0)
+
+#define NV_PFIFO_RAMHT			0x00002210
+
+#define NV_PTV_TV_INDEX			0x0000d220
+#define NV_PTV_TV_DATA			0x0000d224
+#define NV_PTV_HFILTER			0x0000d310
+#define NV_PTV_HFILTER2			0x0000d390
+#define NV_PTV_VFILTER			0x0000d510
+
+#define NV_PRMVIO_MISC__WRITE		0x000c03c2
+#define NV_PRMVIO_SRX			0x000c03c4
+#define NV_PRMVIO_SR			0x000c03c5
+#	define NV_VIO_SR_RESET_INDEX		0x00
+#	define NV_VIO_SR_CLOCK_INDEX		0x01
+#	define NV_VIO_SR_PLANE_MASK_INDEX	0x02
+#	define NV_VIO_SR_CHAR_MAP_INDEX		0x03
+#	define NV_VIO_SR_MEM_MODE_INDEX		0x04
+#define NV_PRMVIO_MISC__READ		0x000c03cc
+#define NV_PRMVIO_GRX			0x000c03ce
+#define NV_PRMVIO_GX			0x000c03cf
+#	define NV_VIO_GX_SR_INDEX		0x00
+#	define NV_VIO_GX_SREN_INDEX		0x01
+#	define NV_VIO_GX_CCOMP_INDEX		0x02
+#	define NV_VIO_GX_ROP_INDEX		0x03
+#	define NV_VIO_GX_READ_MAP_INDEX		0x04
+#	define NV_VIO_GX_MODE_INDEX		0x05
+#	define NV_VIO_GX_MISC_INDEX		0x06
+#	define NV_VIO_GX_DONT_CARE_INDEX	0x07
+#	define NV_VIO_GX_BIT_MASK_INDEX		0x08
+
+#define NV_PFB_BOOT_0			0x00100000
+#define NV_PFB_CFG0			0x00100200
+#define NV_PFB_CFG1			0x00100204
+#define NV_PFB_CSTATUS			0x0010020C
+#define NV_PFB_REFCTRL			0x00100210
+#	define NV_PFB_REFCTRL_VALID_1			(1 << 31)
+#define NV_PFB_PAD			0x0010021C
+#	define NV_PFB_PAD_CKE_NORMAL			(1 << 0)
+#define NV_PFB_TILE_NV10		0x00100240
+#define NV_PFB_TILE_SIZE_NV10		0x00100244
+#define NV_PFB_REF			0x001002D0
+#	define NV_PFB_REF_CMD_REFRESH			(1 << 0)
+#define NV_PFB_PRE			0x001002D4
+#	define NV_PFB_PRE_CMD_PRECHARGE			(1 << 0)
+#define NV_PFB_CLOSE_PAGE2		0x0010033C
+#define NV_PFB_TILE_NV40		0x00100600
+#define NV_PFB_TILE_SIZE_NV40		0x00100604
+
+#define NV_PEXTDEV_BOOT_0		0x00101000
+#	define NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT	(8 << 12)
+#define NV_PEXTDEV_BOOT_3		0x0010100c
+
+#define NV_PCRTC_INTR_0					0x00600100
+#	define NV_PCRTC_INTR_0_VBLANK				(1 << 0)
+#define NV_PCRTC_INTR_EN_0				0x00600140
+#define NV_PCRTC_START					0x00600800
+#define NV_PCRTC_CONFIG					0x00600804
+#	define NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA		(1 << 0)
+#	define NV_PCRTC_CONFIG_START_ADDRESS_HSYNC		(2 << 0)
+#define NV_PCRTC_CURSOR_CONFIG				0x00600810
+#	define NV_PCRTC_CURSOR_CONFIG_ENABLE_ENABLE		(1 << 0)
+#	define NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE	(1 << 4)
+#	define NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM	(1 << 8)
+#	define NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32		(1 << 12)
+#	define NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64		(1 << 16)
+#	define NV_PCRTC_CURSOR_CONFIG_CUR_LINES_32		(2 << 24)
+#	define NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64		(4 << 24)
+#	define NV_PCRTC_CURSOR_CONFIG_CUR_BLEND_ALPHA		(1 << 28)
+
+/* note: PCRTC_GPIO is not available on nv10, and in fact aliases 0x600810 */
+#define NV_PCRTC_GPIO					0x00600818
+#define NV_PCRTC_GPIO_EXT				0x0060081c
+#define NV_PCRTC_830					0x00600830
+#define NV_PCRTC_834					0x00600834
+#define NV_PCRTC_850					0x00600850
+#define NV_PCRTC_ENGINE_CTRL				0x00600860
+#	define NV_CRTC_FSEL_I2C					(1 << 4)
+#	define NV_CRTC_FSEL_OVERLAY				(1 << 12)
+
+#define NV_PRMCIO_ARX			0x006013c0
+#define NV_PRMCIO_AR__WRITE		0x006013c0
+#define NV_PRMCIO_AR__READ		0x006013c1
+#	define NV_CIO_AR_MODE_INDEX		0x10
+#	define NV_CIO_AR_OSCAN_INDEX		0x11
+#	define NV_CIO_AR_PLANE_INDEX		0x12
+#	define NV_CIO_AR_HPP_INDEX		0x13
+#	define NV_CIO_AR_CSEL_INDEX		0x14
+#define NV_PRMCIO_INP0			0x006013c2
+#define NV_PRMCIO_CRX__COLOR		0x006013d4
+#define NV_PRMCIO_CR__COLOR		0x006013d5
+	/* Standard VGA CRTC registers */
+#	define NV_CIO_CR_HDT_INDEX		0x00	/* horizontal display total */
+#	define NV_CIO_CR_HDE_INDEX		0x01	/* horizontal display end */
+#	define NV_CIO_CR_HBS_INDEX		0x02	/* horizontal blanking start */
+#	define NV_CIO_CR_HBE_INDEX		0x03	/* horizontal blanking end */
+#		define NV_CIO_CR_HBE_4_0		4:0
+#	define NV_CIO_CR_HRS_INDEX		0x04	/* horizontal retrace start */
+#	define NV_CIO_CR_HRE_INDEX		0x05	/* horizontal retrace end */
+#		define NV_CIO_CR_HRE_4_0		4:0
+#		define NV_CIO_CR_HRE_HBE_5		7:7
+#	define NV_CIO_CR_VDT_INDEX		0x06	/* vertical display total */
+#	define NV_CIO_CR_OVL_INDEX		0x07	/* overflow bits */
+#		define NV_CIO_CR_OVL_VDT_8		0:0
+#		define NV_CIO_CR_OVL_VDE_8		1:1
+#		define NV_CIO_CR_OVL_VRS_8		2:2
+#		define NV_CIO_CR_OVL_VBS_8		3:3
+#		define NV_CIO_CR_OVL_VDT_9		5:5
+#		define NV_CIO_CR_OVL_VDE_9		6:6
+#		define NV_CIO_CR_OVL_VRS_9		7:7
+#	define NV_CIO_CR_RSAL_INDEX		0x08	/* normally "preset row scan" */
+#	define NV_CIO_CR_CELL_HT_INDEX		0x09	/* cell height?! normally "max scan line" */
+#		define NV_CIO_CR_CELL_HT_VBS_9		5:5
+#		define NV_CIO_CR_CELL_HT_SCANDBL	7:7
+#	define NV_CIO_CR_CURS_ST_INDEX		0x0a	/* cursor start */
+#	define NV_CIO_CR_CURS_END_INDEX		0x0b	/* cursor end */
+#	define NV_CIO_CR_SA_HI_INDEX		0x0c	/* screen start address high */
+#	define NV_CIO_CR_SA_LO_INDEX		0x0d	/* screen start address low */
+#	define NV_CIO_CR_TCOFF_HI_INDEX		0x0e	/* cursor offset high */
+#	define NV_CIO_CR_TCOFF_LO_INDEX		0x0f	/* cursor offset low */
+#	define NV_CIO_CR_VRS_INDEX		0x10	/* vertical retrace start */
+#	define NV_CIO_CR_VRE_INDEX		0x11	/* vertical retrace end */
+#		define NV_CIO_CR_VRE_3_0		3:0
+#	define NV_CIO_CR_VDE_INDEX		0x12	/* vertical display end */
+#	define NV_CIO_CR_OFFSET_INDEX		0x13	/* sets screen pitch */
+#	define NV_CIO_CR_ULINE_INDEX		0x14	/* underline location */
+#	define NV_CIO_CR_VBS_INDEX		0x15	/* vertical blank start */
+#	define NV_CIO_CR_VBE_INDEX		0x16	/* vertical blank end */
+#	define NV_CIO_CR_MODE_INDEX		0x17	/* crtc mode control */
+#	define NV_CIO_CR_LCOMP_INDEX		0x18	/* line compare */
+	/* Extended VGA CRTC registers */
+#	define NV_CIO_CRE_RPC0_INDEX		0x19	/* repaint control 0 */
+#		define NV_CIO_CRE_RPC0_OFFSET_10_8	7:5
+#	define NV_CIO_CRE_RPC1_INDEX		0x1a	/* repaint control 1 */
+#		define NV_CIO_CRE_RPC1_LARGE		2:2
+#	define NV_CIO_CRE_FF_INDEX		0x1b	/* fifo control */
+#	define NV_CIO_CRE_ENH_INDEX		0x1c	/* enhanced? */
+#	define NV_CIO_SR_LOCK_INDEX		0x1f	/* crtc lock */
+#		define NV_CIO_SR_UNLOCK_RW_VALUE	0x57
+#		define NV_CIO_SR_LOCK_VALUE		0x99
+#	define NV_CIO_CRE_FFLWM__INDEX		0x20	/* fifo low water mark */
+#	define NV_CIO_CRE_21			0x21	/* vga shadow crtc lock */
+#	define NV_CIO_CRE_LSR_INDEX		0x25	/* ? */
+#		define NV_CIO_CRE_LSR_VDT_10		0:0
+#		define NV_CIO_CRE_LSR_VDE_10		1:1
+#		define NV_CIO_CRE_LSR_VRS_10		2:2
+#		define NV_CIO_CRE_LSR_VBS_10		3:3
+#		define NV_CIO_CRE_LSR_HBE_6		4:4
+#	define NV_CIO_CR_ARX_INDEX		0x26	/* attribute index -- ro copy of 0x60.3c0 */
+#	define NV_CIO_CRE_CHIP_ID_INDEX		0x27	/* chip revision */
+#	define NV_CIO_CRE_PIXEL_INDEX		0x28
+#		define NV_CIO_CRE_PIXEL_FORMAT		1:0
+#	define NV_CIO_CRE_HEB__INDEX		0x2d	/* horizontal extra bits? */
+#		define NV_CIO_CRE_HEB_HDT_8		0:0
+#		define NV_CIO_CRE_HEB_HDE_8		1:1
+#		define NV_CIO_CRE_HEB_HBS_8		2:2
+#		define NV_CIO_CRE_HEB_HRS_8		3:3
+#		define NV_CIO_CRE_HEB_ILC_8		4:4
+#	define NV_CIO_CRE_2E			0x2e	/* some scratch or dummy reg to force writes to sink in */
+#	define NV_CIO_CRE_HCUR_ADDR2_INDEX	0x2f	/* cursor */
+#	define NV_CIO_CRE_HCUR_ADDR0_INDEX	0x30		/* pixmap */
+#		define NV_CIO_CRE_HCUR_ADDR0_ADR	6:0
+#		define NV_CIO_CRE_HCUR_ASI		7:7
+#	define NV_CIO_CRE_HCUR_ADDR1_INDEX	0x31			/* address */
+#		define NV_CIO_CRE_HCUR_ADDR1_ENABLE	0:0
+#		define NV_CIO_CRE_HCUR_ADDR1_CUR_DBL	1:1
+#		define NV_CIO_CRE_HCUR_ADDR1_ADR	7:2
+#	define NV_CIO_CRE_LCD__INDEX		0x33
+#		define NV_CIO_CRE_LCD_LCD_SELECT	0:0
+#	define NV_CIO_CRE_DDC0_STATUS__INDEX	0x36
+#	define NV_CIO_CRE_DDC0_WR__INDEX	0x37
+#	define NV_CIO_CRE_ILACE__INDEX		0x39	/* interlace */
+#	define NV_CIO_CRE_SCRATCH3__INDEX	0x3b
+#	define NV_CIO_CRE_SCRATCH4__INDEX	0x3c
+#	define NV_CIO_CRE_DDC_STATUS__INDEX	0x3e
+#	define NV_CIO_CRE_DDC_WR__INDEX		0x3f
+#	define NV_CIO_CRE_EBR_INDEX		0x41	/* extra bits ? (vertical) */
+#		define NV_CIO_CRE_EBR_VDT_11		0:0
+#		define NV_CIO_CRE_EBR_VDE_11		2:2
+#		define NV_CIO_CRE_EBR_VRS_11		4:4
+#		define NV_CIO_CRE_EBR_VBS_11		6:6
+#	define NV_CIO_CRE_43			0x43
+#	define NV_CIO_CRE_44			0x44	/* head control */
+#	define NV_CIO_CRE_CSB			0x45	/* colour saturation boost */
+#	define NV_CIO_CRE_RCR			0x46
+#		define NV_CIO_CRE_RCR_ENDIAN_BIG	7:7
+#	define NV_CIO_CRE_47			0x47	/* extended fifo lwm, used on nv30+ */
+#	define NV_CIO_CRE_49			0x49
+#	define NV_CIO_CRE_4B			0x4b	/* given patterns in 0x[2-3][a-c] regs, probably scratch 6 */
+#	define NV_CIO_CRE_TVOUT_LATENCY		0x52
+#	define NV_CIO_CRE_53			0x53	/* `fp_htiming' according to Haiku */
+#	define NV_CIO_CRE_54			0x54	/* `fp_vtiming' according to Haiku */
+#	define NV_CIO_CRE_57			0x57	/* index reg for cr58 */
+#	define NV_CIO_CRE_58			0x58	/* data reg for cr57 */
+#	define NV_CIO_CRE_59			0x59	/* related to on/off-chip-ness of digital outputs */
+#	define NV_CIO_CRE_5B			0x5B	/* newer colour saturation reg */
+#	define NV_CIO_CRE_85			0x85
+#	define NV_CIO_CRE_86			0x86
+#define NV_PRMCIO_INP0__COLOR		0x006013da
+
+#define NV_PRAMDAC_CU_START_POS				0x00680300
+#	define NV_PRAMDAC_CU_START_POS_X			15:0
+#	define NV_PRAMDAC_CU_START_POS_Y			31:16
+#define NV_RAMDAC_NV10_CURSYNC				0x00680404
+
+#define NV_PRAMDAC_NVPLL_COEFF				0x00680500
+#define NV_PRAMDAC_MPLL_COEFF				0x00680504
+#define NV_PRAMDAC_VPLL_COEFF				0x00680508
+#	define NV30_RAMDAC_ENABLE_VCO2				(8 << 4)
+
+#define NV_PRAMDAC_PLL_COEFF_SELECT			0x0068050c
+#	define NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE	(4 << 0)
+#	define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL	(1 << 8)
+#	define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_VPLL	(2 << 8)
+#	define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL	(4 << 8)
+#	define NV_PRAMDAC_PLL_COEFF_SELECT_PLL_SOURCE_VPLL2	(8 << 8)
+#	define NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1		(1 << 16)
+#	define NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1		(2 << 16)
+#	define NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2		(4 << 16)
+#	define NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2		(8 << 16)
+#	define NV_PRAMDAC_PLL_COEFF_SELECT_TV_CLK_SOURCE_VIP	(1 << 20)
+#	define NV_PRAMDAC_PLL_COEFF_SELECT_VCLK_RATIO_DB2	(1 << 28)
+#	define NV_PRAMDAC_PLL_COEFF_SELECT_VCLK2_RATIO_DB2	(2 << 28)
+
+#define NV_PRAMDAC_PLL_SETUP_CONTROL			0x00680510
+#define NV_RAMDAC_VPLL2					0x00680520
+#define NV_PRAMDAC_SEL_CLK				0x00680524
+#define NV_RAMDAC_DITHER_NV11				0x00680528
+#define NV_PRAMDAC_DACCLK				0x0068052c
+#	define NV_PRAMDAC_DACCLK_SEL_DACCLK			(1 << 0)
+
+#define NV_RAMDAC_NVPLL_B				0x00680570
+#define NV_RAMDAC_MPLL_B				0x00680574
+#define NV_RAMDAC_VPLL_B				0x00680578
+#define NV_RAMDAC_VPLL2_B				0x0068057c
+#	define NV31_RAMDAC_ENABLE_VCO2				(8 << 28)
+#define NV_PRAMDAC_580					0x00680580
+#	define NV_RAMDAC_580_VPLL1_ACTIVE			(1 << 8)
+#	define NV_RAMDAC_580_VPLL2_ACTIVE			(1 << 28)
+
+#define NV_PRAMDAC_GENERAL_CONTROL			0x00680600
+#	define NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON		(3 << 4)
+#	define NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL		(1 << 8)
+#	define NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL		(1 << 12)
+#	define NV_PRAMDAC_GENERAL_CONTROL_TERMINATION_75OHM	(2 << 16)
+#	define NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS		(1 << 20)
+#	define NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG		(2 << 28)
+#define NV_PRAMDAC_TEST_CONTROL				0x00680608
+#	define NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED	(1 << 12)
+#	define NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF		(1 << 16)
+#	define NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI		(1 << 28)
+#define NV_PRAMDAC_TESTPOINT_DATA			0x00680610
+#	define NV_PRAMDAC_TESTPOINT_DATA_NOTBLANK		(8 << 28)
+#define NV_PRAMDAC_630					0x00680630
+#define NV_PRAMDAC_634					0x00680634
+
+#define NV_PRAMDAC_TV_SETUP				0x00680700
+#define NV_PRAMDAC_TV_VTOTAL				0x00680720
+#define NV_PRAMDAC_TV_VSKEW				0x00680724
+#define NV_PRAMDAC_TV_VSYNC_DELAY			0x00680728
+#define NV_PRAMDAC_TV_HTOTAL				0x0068072c
+#define NV_PRAMDAC_TV_HSKEW				0x00680730
+#define NV_PRAMDAC_TV_HSYNC_DELAY			0x00680734
+#define NV_PRAMDAC_TV_HSYNC_DELAY2			0x00680738
+
+#define NV_PRAMDAC_TV_SETUP                             0x00680700
+
+#define NV_PRAMDAC_FP_VDISPLAY_END			0x00680800
+#define NV_PRAMDAC_FP_VTOTAL				0x00680804
+#define NV_PRAMDAC_FP_VCRTC				0x00680808
+#define NV_PRAMDAC_FP_VSYNC_START			0x0068080c
+#define NV_PRAMDAC_FP_VSYNC_END				0x00680810
+#define NV_PRAMDAC_FP_VVALID_START			0x00680814
+#define NV_PRAMDAC_FP_VVALID_END			0x00680818
+#define NV_PRAMDAC_FP_HDISPLAY_END			0x00680820
+#define NV_PRAMDAC_FP_HTOTAL				0x00680824
+#define NV_PRAMDAC_FP_HCRTC				0x00680828
+#define NV_PRAMDAC_FP_HSYNC_START			0x0068082c
+#define NV_PRAMDAC_FP_HSYNC_END				0x00680830
+#define NV_PRAMDAC_FP_HVALID_START			0x00680834
+#define NV_PRAMDAC_FP_HVALID_END			0x00680838
+
+#define NV_RAMDAC_FP_DITHER				0x0068083c
+#define NV_PRAMDAC_FP_TG_CONTROL			0x00680848
+#	define NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS		(1 << 0)
+#	define NV_PRAMDAC_FP_TG_CONTROL_VSYNC_DISABLE		(2 << 0)
+#	define NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS		(1 << 4)
+#	define NV_PRAMDAC_FP_TG_CONTROL_HSYNC_DISABLE		(2 << 4)
+#	define NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE		(0 << 8)
+#	define NV_PRAMDAC_FP_TG_CONTROL_MODE_CENTER		(1 << 8)
+#	define NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE		(2 << 8)
+#	define NV_PRAMDAC_FP_TG_CONTROL_READ_PROG		(1 << 20)
+#	define NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12		(1 << 24)
+#	define NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS		(1 << 28)
+#	define NV_PRAMDAC_FP_TG_CONTROL_DISPEN_DISABLE		(2 << 28)
+#define NV_PRAMDAC_FP_MARGIN_COLOR			0x0068084c
+#define NV_PRAMDAC_850					0x00680850
+#define NV_PRAMDAC_85C					0x0068085c
+#define NV_PRAMDAC_FP_DEBUG_0				0x00680880
+#	define NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE		(1 << 0)
+#	define NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE		(1 << 4)
+/* This doesn't seem to be essential for tmds, but still often set */
+#	define NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED		(8 << 4)
+#	define NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR		(1 << 8)
+#	define NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR		(1 << 12)
+#	define NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND		(1 << 20)
+#	define NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND		(1 << 24)
+#       define NV_PRAMDAC_FP_DEBUG_0_PWRDOWN_FPCLK              (1 << 28)
+#define NV_PRAMDAC_FP_DEBUG_1				0x00680884
+#	define NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE		11:0
+#	define NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE	(1 << 12)
+#	define NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE		27:16
+#	define NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE	(1 << 28)
+#define NV_PRAMDAC_FP_DEBUG_2				0x00680888
+#define NV_PRAMDAC_FP_DEBUG_3				0x0068088C
+
+/* see NV_PRAMDAC_INDIR_TMDS in rules.xml */
+#define NV_PRAMDAC_FP_TMDS_CONTROL			0x006808b0
+#	define NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE		(1 << 16)
+#define NV_PRAMDAC_FP_TMDS_DATA				0x006808b4
+
+#define NV_PRAMDAC_8C0                                  0x006808c0
+
+/* Some kind of switch */
+#define NV_PRAMDAC_900					0x00680900
+#define NV_PRAMDAC_A20					0x00680A20
+#define NV_PRAMDAC_A24					0x00680A24
+#define NV_PRAMDAC_A34					0x00680A34
+
+#define NV_PRAMDAC_CTV					0x00680c00
+
+/* names fabricated from NV_USER_DAC info */
+#define NV_PRMDIO_PIXEL_MASK		0x006813c6
+#	define NV_PRMDIO_PIXEL_MASK_MASK	0xff
+#define NV_PRMDIO_READ_MODE_ADDRESS	0x006813c7
+#define NV_PRMDIO_WRITE_MODE_ADDRESS	0x006813c8
+#define NV_PRMDIO_PALETTE_DATA		0x006813c9
+
+#define NV_PGRAPH_DEBUG_0		0x00400080
+#define NV_PGRAPH_DEBUG_1		0x00400084
+#define NV_PGRAPH_DEBUG_2_NV04		0x00400088
+#define NV_PGRAPH_DEBUG_2		0x00400620
+#define NV_PGRAPH_DEBUG_3		0x0040008c
+#define NV_PGRAPH_DEBUG_4		0x00400090
+#define NV_PGRAPH_INTR			0x00400100
+#define NV_PGRAPH_INTR_EN		0x00400140
+#define NV_PGRAPH_CTX_CONTROL		0x00400144
+#define NV_PGRAPH_CTX_CONTROL_NV04	0x00400170
+#define NV_PGRAPH_ABS_UCLIP_XMIN	0x0040053C
+#define NV_PGRAPH_ABS_UCLIP_YMIN	0x00400540
+#define NV_PGRAPH_ABS_UCLIP_XMAX	0x00400544
+#define NV_PGRAPH_ABS_UCLIP_YMAX	0x00400548
+#define NV_PGRAPH_BETA_AND		0x00400608
+#define NV_PGRAPH_LIMIT_VIOL_PIX	0x00400610
+#define NV_PGRAPH_BOFFSET0		0x00400640
+#define NV_PGRAPH_BOFFSET1		0x00400644
+#define NV_PGRAPH_BOFFSET2		0x00400648
+#define NV_PGRAPH_BLIMIT0		0x00400684
+#define NV_PGRAPH_BLIMIT1		0x00400688
+#define NV_PGRAPH_BLIMIT2		0x0040068c
+#define NV_PGRAPH_STATUS		0x00400700
+#define NV_PGRAPH_SURFACE		0x00400710
+#define NV_PGRAPH_STATE			0x00400714
+#define NV_PGRAPH_FIFO			0x00400720
+#define NV_PGRAPH_PATTERN_SHAPE		0x00400810
+#define NV_PGRAPH_TILE			0x00400b00
+
+#define NV_PVIDEO_INTR_EN		0x00008140
+#define NV_PVIDEO_BUFFER		0x00008700
+#define NV_PVIDEO_STOP			0x00008704
+#define NV_PVIDEO_UVPLANE_BASE(buff)	(0x00008800+(buff)*4)
+#define NV_PVIDEO_UVPLANE_LIMIT(buff)	(0x00008808+(buff)*4)
+#define NV_PVIDEO_UVPLANE_OFFSET_BUFF(buff)	(0x00008820+(buff)*4)
+#define NV_PVIDEO_BASE(buff)		(0x00008900+(buff)*4)
+#define NV_PVIDEO_LIMIT(buff)		(0x00008908+(buff)*4)
+#define NV_PVIDEO_LUMINANCE(buff)	(0x00008910+(buff)*4)
+#define NV_PVIDEO_CHROMINANCE(buff)	(0x00008918+(buff)*4)
+#define NV_PVIDEO_OFFSET_BUFF(buff)	(0x00008920+(buff)*4)
+#define NV_PVIDEO_SIZE_IN(buff)		(0x00008928+(buff)*4)
+#define NV_PVIDEO_POINT_IN(buff)	(0x00008930+(buff)*4)
+#define NV_PVIDEO_DS_DX(buff)		(0x00008938+(buff)*4)
+#define NV_PVIDEO_DT_DY(buff)		(0x00008940+(buff)*4)
+#define NV_PVIDEO_POINT_OUT(buff)	(0x00008948+(buff)*4)
+#define NV_PVIDEO_SIZE_OUT(buff)	(0x00008950+(buff)*4)
+#define NV_PVIDEO_FORMAT(buff)		(0x00008958+(buff)*4)
+#	define NV_PVIDEO_FORMAT_PLANAR			(1 << 0)
+#	define NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8	(1 << 16)
+#	define NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY	(1 << 20)
+#	define NV_PVIDEO_FORMAT_MATRIX_ITURBT709	(1 << 24)
+#define NV_PVIDEO_COLOR_KEY		0x00008B00
+
+/* NV04 overlay defines from VIDIX & Haiku */
+#define NV_PVIDEO_INTR_EN_0		0x00680140
+#define NV_PVIDEO_STEP_SIZE		0x00680200
+#define NV_PVIDEO_CONTROL_Y		0x00680204
+#define NV_PVIDEO_CONTROL_X		0x00680208
+#define NV_PVIDEO_BUFF0_START_ADDRESS	0x0068020c
+#define NV_PVIDEO_BUFF0_PITCH_LENGTH	0x00680214
+#define NV_PVIDEO_BUFF0_OFFSET		0x0068021c
+#define NV_PVIDEO_BUFF1_START_ADDRESS	0x00680210
+#define NV_PVIDEO_BUFF1_PITCH_LENGTH	0x00680218
+#define NV_PVIDEO_BUFF1_OFFSET		0x00680220
+#define NV_PVIDEO_OE_STATE		0x00680224
+#define NV_PVIDEO_SU_STATE		0x00680228
+#define NV_PVIDEO_RM_STATE		0x0068022c
+#define NV_PVIDEO_WINDOW_START		0x00680230
+#define NV_PVIDEO_WINDOW_SIZE		0x00680234
+#define NV_PVIDEO_FIFO_THRES_SIZE	0x00680238
+#define NV_PVIDEO_FIFO_BURST_LENGTH	0x0068023c
+#define NV_PVIDEO_KEY			0x00680240
+#define NV_PVIDEO_OVERLAY		0x00680244
+#define NV_PVIDEO_RED_CSC_OFFSET	0x00680280
+#define NV_PVIDEO_GREEN_CSC_OFFSET	0x00680284
+#define NV_PVIDEO_BLUE_CSC_OFFSET	0x00680288
+#define NV_PVIDEO_CSC_ADJUST		0x0068028c
+
+#endif
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index b5713eed..feb52ee 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -49,7 +49,7 @@
 	radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
 	rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
 	r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
-	r600_blit_kms.o radeon_pm.o
+	r600_blit_kms.o radeon_pm.o atombios_dp.o
 
 radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
 
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index d67c425..6578d19 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -263,10 +263,10 @@
 	case ATOM_ARG_FB:
 		idx = U8(*ptr);
 		(*ptr)++;
+		val = gctx->scratch[((gctx->fb_base + idx) / 4)];
 		if (print)
 			DEBUG("FB[0x%02X]", idx);
-		printk(KERN_INFO "FB access is not implemented.\n");
-		return 0;
+		break;
 	case ATOM_ARG_IMM:
 		switch (align) {
 		case ATOM_SRC_DWORD:
@@ -488,9 +488,9 @@
 	case ATOM_ARG_FB:
 		idx = U8(*ptr);
 		(*ptr)++;
+		gctx->scratch[((gctx->fb_base + idx) / 4)] = val;
 		DEBUG("FB[0x%02X]", idx);
-		printk(KERN_INFO "FB access is not implemented.\n");
-		return;
+		break;
 	case ATOM_ARG_PLL:
 		idx = U8(*ptr);
 		(*ptr)++;
@@ -1214,3 +1214,28 @@
 		*crev = CU8(idx + 3);
 	return;
 }
+
+int atom_allocate_fb_scratch(struct atom_context *ctx)
+{
+	int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
+	uint16_t data_offset;
+	int usage_bytes;
+	struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
+
+	atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset);
+
+	firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
+
+	DRM_DEBUG("atom firmware requested %08x %dkb\n",
+		  firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
+		  firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
+
+	usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
+	if (usage_bytes == 0)
+		usage_bytes = 20 * 1024;
+	/* allocate some scratch memory */
+	ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
+	if (!ctx->scratch)
+		return -ENOMEM;
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index e6eb38f..6671848 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -132,6 +132,7 @@
 	uint8_t shift;
 	int cs_equal, cs_above;
 	int io_mode;
+	uint32_t *scratch;
 };
 
 extern int atom_debug;
@@ -142,6 +143,7 @@
 void atom_destroy(struct atom_context *);
 void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start);
 void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev);
+int atom_allocate_fb_scratch(struct atom_context *ctx);
 #include "atom-types.h"
 #include "atombios.h"
 #include "ObjectID.h"
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 6643afc..5f48515 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -2680,7 +2680,7 @@
 typedef struct _ATOM_HPD_INT_RECORD {
 	ATOM_COMMON_RECORD_HEADER sheader;
 	UCHAR ucHPDIntGPIOID;	/* Corresponding block in GPIO_PIN_INFO table gives the pin info */
-	UCHAR ucPluggged_PinState;
+	UCHAR ucPlugged_PinState;
 } ATOM_HPD_INT_RECORD;
 
 typedef struct _ATOM_OUTPUT_PROTECTION_RECORD {
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index c15287a..260fcf5 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -241,6 +241,7 @@
 {
 	struct drm_device *dev = crtc->dev;
 	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 
 	switch (mode) {
 	case DRM_MODE_DPMS_ON:
@@ -248,20 +249,19 @@
 		if (ASIC_IS_DCE3(rdev))
 			atombios_enable_crtc_memreq(crtc, 1);
 		atombios_blank_crtc(crtc, 0);
+		drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+		radeon_crtc_load_lut(crtc);
 		break;
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
 	case DRM_MODE_DPMS_OFF:
+		drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
 		atombios_blank_crtc(crtc, 1);
 		if (ASIC_IS_DCE3(rdev))
 			atombios_enable_crtc_memreq(crtc, 0);
 		atombios_enable_crtc(crtc, 0);
 		break;
 	}
-
-	if (mode != DRM_MODE_DPMS_OFF) {
-		radeon_crtc_load_lut(crtc);
-	}
 }
 
 static void
@@ -457,9 +457,8 @@
 				if (encoder->encoder_type !=
 				    DRM_MODE_ENCODER_DAC)
 					pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
-				if (!ASIC_IS_AVIVO(rdev)
-				    && (encoder->encoder_type ==
-					DRM_MODE_ENCODER_LVDS))
+				if (encoder->encoder_type ==
+					DRM_MODE_ENCODER_LVDS)
 					pll_flags |= RADEON_PLL_USE_REF_DIV;
 			}
 			radeon_encoder = to_radeon_encoder(encoder);
@@ -500,8 +499,18 @@
 	else
 		pll = &rdev->clock.p2pll;
 
-	radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
-			   &ref_div, &post_div, pll_flags);
+	if (ASIC_IS_AVIVO(rdev)) {
+		if (radeon_new_pll)
+			radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
+						 &fb_div, &frac_fb_div,
+						 &ref_div, &post_div, pll_flags);
+		else
+			radeon_compute_pll(pll, adjusted_clock, &pll_clock,
+					   &fb_div, &frac_fb_div,
+					   &ref_div, &post_div, pll_flags);
+	} else
+		radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
+				   &ref_div, &post_div, pll_flags);
 
 	index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
 	atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
@@ -574,21 +583,32 @@
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_framebuffer *radeon_fb;
 	struct drm_gem_object *obj;
-	struct drm_radeon_gem_object *obj_priv;
+	struct radeon_bo *rbo;
 	uint64_t fb_location;
 	uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+	int r;
 
-	if (!crtc->fb)
-		return -EINVAL;
+	/* no fb bound */
+	if (!crtc->fb) {
+		DRM_DEBUG("No FB bound\n");
+		return 0;
+	}
 
 	radeon_fb = to_radeon_framebuffer(crtc->fb);
 
+	/* Pin framebuffer & get tilling informations */
 	obj = radeon_fb->obj;
-	obj_priv = obj->driver_private;
-
-	if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &fb_location)) {
+	rbo = obj->driver_private;
+	r = radeon_bo_reserve(rbo, false);
+	if (unlikely(r != 0))
+		return r;
+	r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
+	if (unlikely(r != 0)) {
+		radeon_bo_unreserve(rbo);
 		return -EINVAL;
 	}
+	radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+	radeon_bo_unreserve(rbo);
 
 	switch (crtc->fb->bits_per_pixel) {
 	case 8:
@@ -618,8 +638,6 @@
 		return -EINVAL;
 	}
 
-	radeon_object_get_tiling_flags(obj->driver_private,
-				       &tiling_flags, NULL);
 	if (tiling_flags & RADEON_TILING_MACRO)
 		fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
 
@@ -674,7 +692,12 @@
 
 	if (old_fb && old_fb != crtc->fb) {
 		radeon_fb = to_radeon_framebuffer(old_fb);
-		radeon_gem_object_unpin(radeon_fb->obj);
+		rbo = radeon_fb->obj->driver_private;
+		r = radeon_bo_reserve(rbo, false);
+		if (unlikely(r != 0))
+			return r;
+		radeon_bo_unpin(rbo);
+		radeon_bo_unreserve(rbo);
 	}
 
 	/* Bytes per pixel may have changed */
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
new file mode 100644
index 0000000..0d63c44
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -0,0 +1,790 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+
+#include "atom.h"
+#include "atom-bits.h"
+#include "drm_dp_helper.h"
+
+/* move these to drm_dp_helper.c/h */
+#define DP_LINK_CONFIGURATION_SIZE 9
+#define DP_LINK_STATUS_SIZE	   6
+#define DP_DPCD_SIZE	           8
+
+static char *voltage_names[] = {
+        "0.4V", "0.6V", "0.8V", "1.2V"
+};
+static char *pre_emph_names[] = {
+        "0dB", "3.5dB", "6dB", "9.5dB"
+};
+
+static const int dp_clocks[] = {
+	54000,  /* 1 lane, 1.62 Ghz */
+	90000,  /* 1 lane, 2.70 Ghz */
+	108000, /* 2 lane, 1.62 Ghz */
+	180000, /* 2 lane, 2.70 Ghz */
+	216000, /* 4 lane, 1.62 Ghz */
+	360000, /* 4 lane, 2.70 Ghz */
+};
+
+static const int num_dp_clocks = sizeof(dp_clocks) / sizeof(int);
+
+/* common helper functions */
+static int dp_lanes_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
+{
+	int i;
+	u8 max_link_bw;
+	u8 max_lane_count;
+
+	if (!dpcd)
+		return 0;
+
+	max_link_bw = dpcd[DP_MAX_LINK_RATE];
+	max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+
+	switch (max_link_bw) {
+	case DP_LINK_BW_1_62:
+	default:
+		for (i = 0; i < num_dp_clocks; i++) {
+			if (i % 2)
+				continue;
+			switch (max_lane_count) {
+			case 1:
+				if (i > 1)
+					return 0;
+				break;
+			case 2:
+				if (i > 3)
+					return 0;
+				break;
+			case 4:
+			default:
+				break;
+			}
+			if (dp_clocks[i] > mode_clock) {
+				if (i < 2)
+					return 1;
+				else if (i < 4)
+					return 2;
+				else
+					return 4;
+			}
+		}
+		break;
+	case DP_LINK_BW_2_7:
+		for (i = 0; i < num_dp_clocks; i++) {
+			switch (max_lane_count) {
+			case 1:
+				if (i > 1)
+					return 0;
+				break;
+			case 2:
+				if (i > 3)
+					return 0;
+				break;
+			case 4:
+			default:
+				break;
+			}
+			if (dp_clocks[i] > mode_clock) {
+				if (i < 2)
+					return 1;
+				else if (i < 4)
+					return 2;
+				else
+					return 4;
+			}
+		}
+		break;
+	}
+
+	return 0;
+}
+
+static int dp_link_clock_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
+{
+	int i;
+	u8 max_link_bw;
+	u8 max_lane_count;
+
+	if (!dpcd)
+		return 0;
+
+	max_link_bw = dpcd[DP_MAX_LINK_RATE];
+	max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+
+	switch (max_link_bw) {
+	case DP_LINK_BW_1_62:
+	default:
+		for (i = 0; i < num_dp_clocks; i++) {
+			if (i % 2)
+				continue;
+			switch (max_lane_count) {
+			case 1:
+				if (i > 1)
+					return 0;
+				break;
+			case 2:
+				if (i > 3)
+					return 0;
+				break;
+			case 4:
+			default:
+				break;
+			}
+			if (dp_clocks[i] > mode_clock)
+				return 162000;
+		}
+		break;
+	case DP_LINK_BW_2_7:
+		for (i = 0; i < num_dp_clocks; i++) {
+			switch (max_lane_count) {
+			case 1:
+				if (i > 1)
+					return 0;
+				break;
+			case 2:
+				if (i > 3)
+					return 0;
+				break;
+			case 4:
+			default:
+				break;
+			}
+			if (dp_clocks[i] > mode_clock)
+				return (i % 2) ? 270000 : 162000;
+		}
+	}
+
+	return 0;
+}
+
+int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
+{
+	int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock);
+	int bw = dp_lanes_for_mode_clock(dpcd, mode_clock);
+
+	if ((lanes == 0) || (bw == 0))
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
+{
+	return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
+			     int lane)
+{
+	int i = DP_LANE0_1_STATUS + (lane >> 1);
+	int s = (lane & 1) * 4;
+	u8 l = dp_link_status(link_status, i);
+	return (l >> s) & 0xf;
+}
+
+static bool dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+				 int lane_count)
+{
+	int lane;
+	u8 lane_status;
+
+	for (lane = 0; lane < lane_count; lane++) {
+		lane_status = dp_get_lane_status(link_status, lane);
+		if ((lane_status & DP_LANE_CR_DONE) == 0)
+			return false;
+	}
+	return true;
+}
+
+static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+			     int lane_count)
+{
+	u8 lane_align;
+	u8 lane_status;
+	int lane;
+
+	lane_align = dp_link_status(link_status,
+				    DP_LANE_ALIGN_STATUS_UPDATED);
+	if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+		return false;
+	for (lane = 0; lane < lane_count; lane++) {
+		lane_status = dp_get_lane_status(link_status, lane);
+		if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
+			return false;
+	}
+	return true;
+}
+
+static u8 dp_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
+					int lane)
+
+{
+	int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+	int s = ((lane & 1) ?
+		 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+		 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+	u8 l = dp_link_status(link_status, i);
+
+	return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+
+static u8 dp_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
+					     int lane)
+{
+	int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+	int s = ((lane & 1) ?
+		 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+		 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+	u8 l = dp_link_status(link_status, i);
+
+	return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+
+/* XXX fix me -- chip specific */
+#define DP_VOLTAGE_MAX         DP_TRAIN_VOLTAGE_SWING_1200
+static u8 dp_pre_emphasis_max(u8 voltage_swing)
+{
+	switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+	case DP_TRAIN_VOLTAGE_SWING_400:
+		return DP_TRAIN_PRE_EMPHASIS_6;
+	case DP_TRAIN_VOLTAGE_SWING_600:
+		return DP_TRAIN_PRE_EMPHASIS_6;
+	case DP_TRAIN_VOLTAGE_SWING_800:
+		return DP_TRAIN_PRE_EMPHASIS_3_5;
+	case DP_TRAIN_VOLTAGE_SWING_1200:
+	default:
+		return DP_TRAIN_PRE_EMPHASIS_0;
+	}
+}
+
+static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
+				int lane_count,
+				u8 train_set[4])
+{
+	u8 v = 0;
+	u8 p = 0;
+	int lane;
+
+	for (lane = 0; lane < lane_count; lane++) {
+		u8 this_v = dp_get_adjust_request_voltage(link_status, lane);
+		u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane);
+
+		DRM_DEBUG("requested signal parameters: lane %d voltage %s pre_emph %s\n",
+			  lane,
+			  voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
+			  pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
+
+		if (this_v > v)
+			v = this_v;
+		if (this_p > p)
+			p = this_p;
+	}
+
+	if (v >= DP_VOLTAGE_MAX)
+		v = DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
+
+	if (p >= dp_pre_emphasis_max(v))
+		p = dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+	DRM_DEBUG("using signal parameters: voltage %s pre_emph %s\n",
+		  voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
+		  pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
+
+	for (lane = 0; lane < 4; lane++)
+		train_set[lane] = v | p;
+}
+
+
+/* radeon aux chan functions */
+bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
+			   int num_bytes, u8 *read_byte,
+			   u8 read_buf_len, u8 delay)
+{
+	struct drm_device *dev = chan->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
+	unsigned char *base;
+
+	memset(&args, 0, sizeof(args));
+
+	base = (unsigned char *)rdev->mode_info.atom_context->scratch;
+
+	memcpy(base, req_bytes, num_bytes);
+
+	args.lpAuxRequest = 0;
+	args.lpDataOut = 16;
+	args.ucDataOutLen = 0;
+	args.ucChannelID = chan->rec.i2c_id;
+	args.ucDelay = delay / 10;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	if (args.ucReplyStatus) {
+		DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n",
+			  req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
+			  chan->rec.i2c_id, args.ucReplyStatus);
+		return false;
+	}
+
+	if (args.ucDataOutLen && read_byte && read_buf_len) {
+		if (read_buf_len < args.ucDataOutLen) {
+			DRM_ERROR("Buffer to small for return answer %d %d\n",
+				  read_buf_len, args.ucDataOutLen);
+			return false;
+		}
+		{
+			int len = min(read_buf_len, args.ucDataOutLen);
+			memcpy(read_byte, base + 16, len);
+		}
+	}
+	return true;
+}
+
+bool radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, uint16_t address,
+				uint8_t send_bytes, uint8_t *send)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+	u8 msg[20];
+	u8 msg_len, dp_msg_len;
+	bool ret;
+
+	dp_msg_len = 4;
+	msg[0] = address;
+	msg[1] = address >> 8;
+	msg[2] = AUX_NATIVE_WRITE << 4;
+	dp_msg_len += send_bytes;
+	msg[3] = (dp_msg_len << 4) | (send_bytes - 1);
+
+	if (send_bytes > 16)
+		return false;
+
+	memcpy(&msg[4], send, send_bytes);
+	msg_len = 4 + send_bytes;
+	ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, NULL, 0, 0);
+	return ret;
+}
+
+bool radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, uint16_t address,
+			       uint8_t delay, uint8_t expected_bytes,
+			       uint8_t *read_p)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+	u8 msg[20];
+	u8 msg_len, dp_msg_len;
+	bool ret = false;
+	msg_len = 4;
+	dp_msg_len = 4;
+	msg[0] = address;
+	msg[1] = address >> 8;
+	msg[2] = AUX_NATIVE_READ << 4;
+	msg[3] = (dp_msg_len) << 4;
+	msg[3] |= expected_bytes - 1;
+
+	ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, read_p, expected_bytes, delay);
+	return ret;
+}
+
+/* radeon dp functions */
+static u8 radeon_dp_encoder_service(struct radeon_device *rdev, int action, int dp_clock,
+				    uint8_t ucconfig, uint8_t lane_num)
+{
+	DP_ENCODER_SERVICE_PARAMETERS args;
+	int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
+
+	memset(&args, 0, sizeof(args));
+	args.ucLinkClock = dp_clock / 10;
+	args.ucConfig = ucconfig;
+	args.ucAction = action;
+	args.ucLaneNum = lane_num;
+	args.ucStatus = 0;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+	return args.ucStatus;
+}
+
+u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+	struct drm_device *dev = radeon_connector->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
+					 dig_connector->dp_i2c_bus->rec.i2c_id, 0);
+}
+
+bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+	u8 msg[25];
+	int ret;
+
+	ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, 0, 8, msg);
+	if (ret) {
+		memcpy(dig_connector->dpcd, msg, 8);
+		{
+			int i;
+			DRM_DEBUG("DPCD: ");
+			for (i = 0; i < 8; i++)
+				DRM_DEBUG("%02x ", msg[i]);
+			DRM_DEBUG("\n");
+		}
+		return true;
+	}
+	dig_connector->dpcd[0] = 0;
+	return false;
+}
+
+void radeon_dp_set_link_config(struct drm_connector *connector,
+			       struct drm_display_mode *mode)
+{
+	struct radeon_connector *radeon_connector;
+	struct radeon_connector_atom_dig *dig_connector;
+
+	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+		return;
+
+	radeon_connector = to_radeon_connector(connector);
+	if (!radeon_connector->con_priv)
+		return;
+	dig_connector = radeon_connector->con_priv;
+
+	dig_connector->dp_clock =
+		dp_link_clock_for_mode_clock(dig_connector->dpcd, mode->clock);
+	dig_connector->dp_lane_count =
+		dp_lanes_for_mode_clock(dig_connector->dpcd, mode->clock);
+}
+
+int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
+				struct drm_display_mode *mode)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+
+	return dp_mode_valid(dig_connector->dpcd, mode->clock);
+}
+
+static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector,
+				    u8 link_status[DP_LINK_STATUS_SIZE])
+{
+	int ret;
+	ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS, 100,
+					DP_LINK_STATUS_SIZE, link_status);
+	if (!ret) {
+		DRM_ERROR("displayport link status failed\n");
+		return false;
+	}
+
+	DRM_DEBUG("link status %02x %02x %02x %02x %02x %02x\n",
+		  link_status[0], link_status[1], link_status[2],
+		  link_status[3], link_status[4], link_status[5]);
+	return true;
+}
+
+bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+	u8 link_status[DP_LINK_STATUS_SIZE];
+
+	if (!atom_dp_get_link_status(radeon_connector, link_status))
+		return false;
+	if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count))
+		return false;
+	return true;
+}
+
+static void dp_set_power(struct radeon_connector *radeon_connector, u8 power_state)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+
+	if (dig_connector->dpcd[0] >= 0x11) {
+		radeon_dp_aux_native_write(radeon_connector, DP_SET_POWER, 1,
+					   &power_state);
+	}
+}
+
+static void dp_set_downspread(struct radeon_connector *radeon_connector, u8 downspread)
+{
+	radeon_dp_aux_native_write(radeon_connector, DP_DOWNSPREAD_CTRL, 1,
+				   &downspread);
+}
+
+static void dp_set_link_bw_lanes(struct radeon_connector *radeon_connector,
+				 u8 link_configuration[DP_LINK_CONFIGURATION_SIZE])
+{
+	radeon_dp_aux_native_write(radeon_connector, DP_LINK_BW_SET, 2,
+				   link_configuration);
+}
+
+static void dp_update_dpvs_emph(struct radeon_connector *radeon_connector,
+				struct drm_encoder *encoder,
+				u8 train_set[4])
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+	int i;
+
+	for (i = 0; i < dig_connector->dp_lane_count; i++)
+		atombios_dig_transmitter_setup(encoder,
+					       ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
+					       i, train_set[i]);
+
+	radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_LANE0_SET,
+				   dig_connector->dp_lane_count, train_set);
+}
+
+static void dp_set_training(struct radeon_connector *radeon_connector,
+			    u8 training)
+{
+	radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_PATTERN_SET,
+				   1, &training);
+}
+
+void dp_link_train(struct drm_encoder *encoder,
+		   struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig;
+	struct radeon_connector *radeon_connector;
+	struct radeon_connector_atom_dig *dig_connector;
+	int enc_id = 0;
+	bool clock_recovery, channel_eq;
+	u8 link_status[DP_LINK_STATUS_SIZE];
+	u8 link_configuration[DP_LINK_CONFIGURATION_SIZE];
+	u8 tries, voltage;
+	u8 train_set[4];
+	int i;
+
+	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+		return;
+
+	if (!radeon_encoder->enc_priv)
+		return;
+	dig = radeon_encoder->enc_priv;
+
+	radeon_connector = to_radeon_connector(connector);
+	if (!radeon_connector->con_priv)
+		return;
+	dig_connector = radeon_connector->con_priv;
+
+	if (ASIC_IS_DCE32(rdev)) {
+		if (dig->dig_block)
+			enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
+		else
+			enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
+		if (dig_connector->linkb)
+			enc_id |= ATOM_DP_CONFIG_LINK_B;
+		else
+			enc_id |= ATOM_DP_CONFIG_LINK_A;
+	} else {
+		if (dig_connector->linkb)
+			enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER | ATOM_DP_CONFIG_LINK_B;
+		else
+			enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER | ATOM_DP_CONFIG_LINK_A;
+	}
+
+	memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+	if (dig_connector->dp_clock == 270000)
+		link_configuration[0] = DP_LINK_BW_2_7;
+	else
+		link_configuration[0] = DP_LINK_BW_1_62;
+	link_configuration[1] = dig_connector->dp_lane_count;
+	if (dig_connector->dpcd[0] >= 0x11)
+		link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+	/* power up the sink */
+	dp_set_power(radeon_connector, DP_SET_POWER_D0);
+	/* disable the training pattern on the sink */
+	dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
+	/* set link bw and lanes on the sink */
+	dp_set_link_bw_lanes(radeon_connector, link_configuration);
+	/* disable downspread on the sink */
+	dp_set_downspread(radeon_connector, 0);
+	/* start training on the source */
+	radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
+				  dig_connector->dp_clock, enc_id, 0);
+	/* set training pattern 1 on the source */
+	radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
+				  dig_connector->dp_clock, enc_id, 0);
+
+	/* set initial vs/emph */
+	memset(train_set, 0, 4);
+	udelay(400);
+	/* set training pattern 1 on the sink */
+	dp_set_training(radeon_connector, DP_TRAINING_PATTERN_1);
+
+	dp_update_dpvs_emph(radeon_connector, encoder, train_set);
+
+	/* clock recovery loop */
+	clock_recovery = false;
+	tries = 0;
+	voltage = 0xff;
+	for (;;) {
+		udelay(100);
+		if (!atom_dp_get_link_status(radeon_connector, link_status))
+			break;
+
+		if (dp_clock_recovery_ok(link_status, dig_connector->dp_lane_count)) {
+			clock_recovery = true;
+			break;
+		}
+
+		for (i = 0; i < dig_connector->dp_lane_count; i++) {
+			if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+				break;
+		}
+		if (i == dig_connector->dp_lane_count) {
+			DRM_ERROR("clock recovery reached max voltage\n");
+			break;
+		}
+
+		if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+			++tries;
+			if (tries == 5) {
+				DRM_ERROR("clock recovery tried 5 times\n");
+				break;
+			}
+		} else
+			tries = 0;
+
+		voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+		/* Compute new train_set as requested by sink */
+		dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set);
+		dp_update_dpvs_emph(radeon_connector, encoder, train_set);
+	}
+	if (!clock_recovery)
+		DRM_ERROR("clock recovery failed\n");
+	else
+		DRM_DEBUG("clock recovery at voltage %d pre-emphasis %d\n",
+			  train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
+			  (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+			  DP_TRAIN_PRE_EMPHASIS_SHIFT);
+
+
+	/* set training pattern 2 on the sink */
+	dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2);
+	/* set training pattern 2 on the source */
+	radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
+				  dig_connector->dp_clock, enc_id, 1);
+
+	/* channel equalization loop */
+	tries = 0;
+	channel_eq = false;
+	for (;;) {
+		udelay(400);
+		if (!atom_dp_get_link_status(radeon_connector, link_status))
+			break;
+
+		if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count)) {
+			channel_eq = true;
+			break;
+		}
+
+		/* Try 5 times */
+		if (tries > 5) {
+			DRM_ERROR("channel eq failed: 5 tries\n");
+			break;
+		}
+
+		/* Compute new train_set as requested by sink */
+		dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set);
+		dp_update_dpvs_emph(radeon_connector, encoder, train_set);
+
+		tries++;
+	}
+
+	if (!channel_eq)
+		DRM_ERROR("channel eq failed\n");
+	else
+		DRM_DEBUG("channel eq at voltage %d pre-emphasis %d\n",
+			  train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
+			  (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
+			  >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
+
+	/* disable the training pattern on the sink */
+	dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
+
+	radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
+				  dig_connector->dp_clock, enc_id, 0);
+}
+
+int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+			 uint8_t write_byte, uint8_t *read_byte)
+{
+	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+	struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter;
+	int ret = 0;
+	uint16_t address = algo_data->address;
+	uint8_t msg[5];
+	uint8_t reply[2];
+	int msg_len, dp_msg_len;
+	int reply_bytes;
+
+	/* Set up the command byte */
+	if (mode & MODE_I2C_READ)
+		msg[2] = AUX_I2C_READ << 4;
+	else
+		msg[2] = AUX_I2C_WRITE << 4;
+
+	if (!(mode & MODE_I2C_STOP))
+		msg[2] |= AUX_I2C_MOT << 4;
+
+	msg[0] = address;
+	msg[1] = address >> 8;
+
+	reply_bytes = 1;
+
+	msg_len = 4;
+	dp_msg_len = 3;
+	switch (mode) {
+	case MODE_I2C_WRITE:
+		msg[4] = write_byte;
+		msg_len++;
+		dp_msg_len += 2;
+		break;
+	case MODE_I2C_READ:
+		dp_msg_len += 1;
+		break;
+	default:
+		break;
+	}
+
+	msg[3] = (dp_msg_len) << 4;
+	ret = radeon_process_aux_ch(auxch, msg, msg_len, reply, reply_bytes, 0);
+
+	if (ret) {
+		if (read_byte)
+			*read_byte = reply[0];
+		return reply_bytes;
+	}
+	return -EREMOTEIO;
+}
+
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index c9e93ea..824cc64 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -65,6 +65,95 @@
  * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
  */
 
+/* hpd for digital panel detect/disconnect */
+bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+	bool connected = false;
+
+	switch (hpd) {
+	case RADEON_HPD_1:
+		if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE)
+			connected = true;
+		break;
+	case RADEON_HPD_2:
+		if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE)
+			connected = true;
+		break;
+	default:
+		break;
+	}
+	return connected;
+}
+
+void r100_hpd_set_polarity(struct radeon_device *rdev,
+			   enum radeon_hpd_id hpd)
+{
+	u32 tmp;
+	bool connected = r100_hpd_sense(rdev, hpd);
+
+	switch (hpd) {
+	case RADEON_HPD_1:
+		tmp = RREG32(RADEON_FP_GEN_CNTL);
+		if (connected)
+			tmp &= ~RADEON_FP_DETECT_INT_POL;
+		else
+			tmp |= RADEON_FP_DETECT_INT_POL;
+		WREG32(RADEON_FP_GEN_CNTL, tmp);
+		break;
+	case RADEON_HPD_2:
+		tmp = RREG32(RADEON_FP2_GEN_CNTL);
+		if (connected)
+			tmp &= ~RADEON_FP2_DETECT_INT_POL;
+		else
+			tmp |= RADEON_FP2_DETECT_INT_POL;
+		WREG32(RADEON_FP2_GEN_CNTL, tmp);
+		break;
+	default:
+		break;
+	}
+}
+
+void r100_hpd_init(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		switch (radeon_connector->hpd.hpd) {
+		case RADEON_HPD_1:
+			rdev->irq.hpd[0] = true;
+			break;
+		case RADEON_HPD_2:
+			rdev->irq.hpd[1] = true;
+			break;
+		default:
+			break;
+		}
+	}
+	r100_irq_set(rdev);
+}
+
+void r100_hpd_fini(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		switch (radeon_connector->hpd.hpd) {
+		case RADEON_HPD_1:
+			rdev->irq.hpd[0] = false;
+			break;
+		case RADEON_HPD_2:
+			rdev->irq.hpd[1] = false;
+			break;
+		default:
+			break;
+		}
+	}
+}
+
 /*
  * PCI GART
  */
@@ -94,6 +183,15 @@
 	return radeon_gart_table_ram_alloc(rdev);
 }
 
+/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
+void r100_enable_bm(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+	/* Enable bus mastering */
+	tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+	WREG32(RADEON_BUS_CNTL, tmp);
+}
+
 int r100_pci_gart_enable(struct radeon_device *rdev)
 {
 	uint32_t tmp;
@@ -105,9 +203,6 @@
 	WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
 	tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
 	WREG32(RADEON_AIC_HI_ADDR, tmp);
-	/* Enable bus mastering */
-	tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
-	WREG32(RADEON_BUS_CNTL, tmp);
 	/* set PCI GART page-table base address */
 	WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
 	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
@@ -157,6 +252,12 @@
 	if (rdev->irq.crtc_vblank_int[1]) {
 		tmp |= RADEON_CRTC2_VBLANK_MASK;
 	}
+	if (rdev->irq.hpd[0]) {
+		tmp |= RADEON_FP_DETECT_MASK;
+	}
+	if (rdev->irq.hpd[1]) {
+		tmp |= RADEON_FP2_DETECT_MASK;
+	}
 	WREG32(RADEON_GEN_INT_CNTL, tmp);
 	return 0;
 }
@@ -175,8 +276,9 @@
 static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
 {
 	uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
-	uint32_t irq_mask = RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT |
-		RADEON_CRTC2_VBLANK_STAT;
+	uint32_t irq_mask = RADEON_SW_INT_TEST |
+		RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
+		RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
 
 	if (irqs) {
 		WREG32(RADEON_GEN_INT_STATUS, irqs);
@@ -187,6 +289,7 @@
 int r100_irq_process(struct radeon_device *rdev)
 {
 	uint32_t status, msi_rearm;
+	bool queue_hotplug = false;
 
 	status = r100_irq_ack(rdev);
 	if (!status) {
@@ -207,8 +310,18 @@
 		if (status & RADEON_CRTC2_VBLANK_STAT) {
 			drm_handle_vblank(rdev->ddev, 1);
 		}
+		if (status & RADEON_FP_DETECT_STAT) {
+			queue_hotplug = true;
+			DRM_DEBUG("HPD1\n");
+		}
+		if (status & RADEON_FP2_DETECT_STAT) {
+			queue_hotplug = true;
+			DRM_DEBUG("HPD2\n");
+		}
 		status = r100_irq_ack(rdev);
 	}
+	if (queue_hotplug)
+		queue_work(rdev->wq, &rdev->hotplug_work);
 	if (rdev->msi_enabled) {
 		switch (rdev->family) {
 		case CHIP_RS400:
@@ -255,24 +368,27 @@
 	int r;
 
 	if (rdev->wb.wb_obj == NULL) {
-		r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
-					 true,
-					 RADEON_GEM_DOMAIN_GTT,
-					 false, &rdev->wb.wb_obj);
+		r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
+					RADEON_GEM_DOMAIN_GTT,
+					&rdev->wb.wb_obj);
 		if (r) {
-			DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r);
+			dev_err(rdev->dev, "(%d) create WB buffer failed\n", r);
 			return r;
 		}
-		r = radeon_object_pin(rdev->wb.wb_obj,
-				      RADEON_GEM_DOMAIN_GTT,
-				      &rdev->wb.gpu_addr);
+		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+		if (unlikely(r != 0))
+			return r;
+		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+					&rdev->wb.gpu_addr);
 		if (r) {
-			DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r);
+			dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r);
+			radeon_bo_unreserve(rdev->wb.wb_obj);
 			return r;
 		}
-		r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+		radeon_bo_unreserve(rdev->wb.wb_obj);
 		if (r) {
-			DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r);
+			dev_err(rdev->dev, "(%d) map WB buffer failed\n", r);
 			return r;
 		}
 	}
@@ -290,11 +406,19 @@
 
 void r100_wb_fini(struct radeon_device *rdev)
 {
+	int r;
+
 	r100_wb_disable(rdev);
 	if (rdev->wb.wb_obj) {
-		radeon_object_kunmap(rdev->wb.wb_obj);
-		radeon_object_unpin(rdev->wb.wb_obj);
-		radeon_object_unref(&rdev->wb.wb_obj);
+		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+		if (unlikely(r != 0)) {
+			dev_err(rdev->dev, "(%d) can't finish WB\n", r);
+			return;
+		}
+		radeon_bo_kunmap(rdev->wb.wb_obj);
+		radeon_bo_unpin(rdev->wb.wb_obj);
+		radeon_bo_unreserve(rdev->wb.wb_obj);
+		radeon_bo_unref(&rdev->wb.wb_obj);
 		rdev->wb.wb = NULL;
 		rdev->wb.wb_obj = NULL;
 	}
@@ -1288,17 +1412,17 @@
 
 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
 					 struct radeon_cs_packet *pkt,
-					 struct radeon_object *robj)
+					 struct radeon_bo *robj)
 {
 	unsigned idx;
 	u32 value;
 	idx = pkt->idx + 1;
 	value = radeon_get_ib_value(p, idx + 2);
-	if ((value + 1) > radeon_object_size(robj)) {
+	if ((value + 1) > radeon_bo_size(robj)) {
 		DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
 			  "(need %u have %lu) !\n",
 			  value + 1,
-			  radeon_object_size(robj));
+			  radeon_bo_size(robj));
 		return -EINVAL;
 	}
 	return 0;
@@ -1583,6 +1707,14 @@
 	r100_hdp_reset(rdev);
 }
 
+void r100_hdp_flush(struct radeon_device *rdev)
+{
+	u32 tmp;
+	tmp = RREG32(RADEON_HOST_PATH_CNTL);
+	tmp |= RADEON_HDP_READ_BUFFER_INVALIDATE;
+	WREG32(RADEON_HOST_PATH_CNTL, tmp);
+}
+
 void r100_hdp_reset(struct radeon_device *rdev)
 {
 	uint32_t tmp;
@@ -1650,6 +1782,17 @@
 	return 0;
 }
 
+void r100_set_common_regs(struct radeon_device *rdev)
+{
+	/* set these so they don't interfere with anything */
+	WREG32(RADEON_OV0_SCALE_CNTL, 0);
+	WREG32(RADEON_SUBPIC_CNTL, 0);
+	WREG32(RADEON_VIPH_CONTROL, 0);
+	WREG32(RADEON_I2C_CNTL_1, 0);
+	WREG32(RADEON_DVI_I2C_CNTL_1, 0);
+	WREG32(RADEON_CAP0_TRIG_CNTL, 0);
+	WREG32(RADEON_CAP1_TRIG_CNTL, 0);
+}
 
 /*
  * VRAM info
@@ -2594,7 +2737,7 @@
 			      struct r100_cs_track *track, unsigned idx)
 {
 	unsigned face, w, h;
-	struct radeon_object *cube_robj;
+	struct radeon_bo *cube_robj;
 	unsigned long size;
 
 	for (face = 0; face < 5; face++) {
@@ -2607,9 +2750,9 @@
 
 		size += track->textures[idx].cube_info[face].offset;
 
-		if (size > radeon_object_size(cube_robj)) {
+		if (size > radeon_bo_size(cube_robj)) {
 			DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
-				  size, radeon_object_size(cube_robj));
+				  size, radeon_bo_size(cube_robj));
 			r100_cs_track_texture_print(&track->textures[idx]);
 			return -1;
 		}
@@ -2620,7 +2763,7 @@
 static int r100_cs_track_texture_check(struct radeon_device *rdev,
 				       struct r100_cs_track *track)
 {
-	struct radeon_object *robj;
+	struct radeon_bo *robj;
 	unsigned long size;
 	unsigned u, i, w, h;
 	int ret;
@@ -2676,9 +2819,9 @@
 				  "%u\n", track->textures[u].tex_coord_type, u);
 			return -EINVAL;
 		}
-		if (size > radeon_object_size(robj)) {
+		if (size > radeon_bo_size(robj)) {
 			DRM_ERROR("Texture of unit %u needs %lu bytes but is "
-				  "%lu\n", u, size, radeon_object_size(robj));
+				  "%lu\n", u, size, radeon_bo_size(robj));
 			r100_cs_track_texture_print(&track->textures[u]);
 			return -EINVAL;
 		}
@@ -2700,10 +2843,10 @@
 		}
 		size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
 		size += track->cb[i].offset;
-		if (size > radeon_object_size(track->cb[i].robj)) {
+		if (size > radeon_bo_size(track->cb[i].robj)) {
 			DRM_ERROR("[drm] Buffer too small for color buffer %d "
 				  "(need %lu have %lu) !\n", i, size,
-				  radeon_object_size(track->cb[i].robj));
+				  radeon_bo_size(track->cb[i].robj));
 			DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
 				  i, track->cb[i].pitch, track->cb[i].cpp,
 				  track->cb[i].offset, track->maxy);
@@ -2717,10 +2860,10 @@
 		}
 		size = track->zb.pitch * track->zb.cpp * track->maxy;
 		size += track->zb.offset;
-		if (size > radeon_object_size(track->zb.robj)) {
+		if (size > radeon_bo_size(track->zb.robj)) {
 			DRM_ERROR("[drm] Buffer too small for z buffer "
 				  "(need %lu have %lu) !\n", size,
-				  radeon_object_size(track->zb.robj));
+				  radeon_bo_size(track->zb.robj));
 			DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
 				  track->zb.pitch, track->zb.cpp,
 				  track->zb.offset, track->maxy);
@@ -2738,11 +2881,12 @@
 					  "bound\n", prim_walk, i);
 				return -EINVAL;
 			}
-			if (size > radeon_object_size(track->arrays[i].robj)) {
-				DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
-					   "have %lu dwords\n", prim_walk, i,
-					   size >> 2,
-					   radeon_object_size(track->arrays[i].robj) >> 2);
+			if (size > radeon_bo_size(track->arrays[i].robj)) {
+				dev_err(rdev->dev, "(PW %u) Vertex array %u "
+					"need %lu dwords have %lu dwords\n",
+					prim_walk, i, size >> 2,
+					radeon_bo_size(track->arrays[i].robj)
+					>> 2);
 				DRM_ERROR("Max indices %u\n", track->max_indx);
 				return -EINVAL;
 			}
@@ -2756,10 +2900,12 @@
 					  "bound\n", prim_walk, i);
 				return -EINVAL;
 			}
-			if (size > radeon_object_size(track->arrays[i].robj)) {
-				DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
-					   "have %lu dwords\n", prim_walk, i, size >> 2,
-					   radeon_object_size(track->arrays[i].robj) >> 2);
+			if (size > radeon_bo_size(track->arrays[i].robj)) {
+				dev_err(rdev->dev, "(PW %u) Vertex array %u "
+					"need %lu dwords have %lu dwords\n",
+					prim_walk, i, size >> 2,
+					radeon_bo_size(track->arrays[i].robj)
+					>> 2);
 				return -EINVAL;
 			}
 		}
@@ -3101,6 +3247,9 @@
 {
 	int r;
 
+	/* set common regs */
+	r100_set_common_regs(rdev);
+	/* program mc */
 	r100_mc_program(rdev);
 	/* Resume clock */
 	r100_clock_startup(rdev);
@@ -3108,13 +3257,13 @@
 	r100_gpu_init(rdev);
 	/* Initialize GART (initialize after TTM so we can allocate
 	 * memory through TTM but finalize after TTM) */
+	r100_enable_bm(rdev);
 	if (rdev->flags & RADEON_IS_PCI) {
 		r = r100_pci_gart_enable(rdev);
 		if (r)
 			return r;
 	}
 	/* Enable IRQ */
-	rdev->irq.sw_int = true;
 	r100_irq_set(rdev);
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
@@ -3150,6 +3299,8 @@
 	radeon_combios_asic_init(rdev->ddev);
 	/* Resume clock after posting */
 	r100_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
 	return r100_startup(rdev);
 }
 
@@ -3174,7 +3325,7 @@
 		r100_pci_gart_fini(rdev);
 	radeon_irq_kms_fini(rdev);
 	radeon_fence_driver_fini(rdev);
-	radeon_object_fini(rdev);
+	radeon_bo_fini(rdev);
 	radeon_atombios_fini(rdev);
 	kfree(rdev->bios);
 	rdev->bios = NULL;
@@ -3242,10 +3393,8 @@
 			RREG32(R_0007C0_CP_STAT));
 	}
 	/* check if cards are posted or not */
-	if (!radeon_card_posted(rdev) && rdev->bios) {
-		DRM_INFO("GPU not posted. posting now...\n");
-		radeon_combios_asic_init(rdev->ddev);
-	}
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
 	/* Set asic errata */
 	r100_errata(rdev);
 	/* Initialize clocks */
@@ -3264,7 +3413,7 @@
 	if (r)
 		return r;
 	/* Memory manager */
-	r = radeon_object_init(rdev);
+	r = radeon_bo_init(rdev);
 	if (r)
 		return r;
 	if (rdev->flags & RADEON_IS_PCI) {
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index 0daf0d7..ca50903 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -10,26 +10,26 @@
  * CS functions
  */
 struct r100_cs_track_cb {
-	struct radeon_object	*robj;
+	struct radeon_bo	*robj;
 	unsigned		pitch;
 	unsigned		cpp;
 	unsigned		offset;
 };
 
 struct r100_cs_track_array {
-	struct radeon_object	*robj;
+	struct radeon_bo	*robj;
 	unsigned		esize;
 };
 
 struct r100_cs_cube_info {
-	struct radeon_object	*robj;
-	unsigned                offset;
+	struct radeon_bo	*robj;
+	unsigned		offset;
 	unsigned		width;
 	unsigned		height;
 };
 
 struct r100_cs_track_texture {
-	struct radeon_object	*robj;
+	struct radeon_bo	*robj;
 	struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */
 	unsigned		pitch;
 	unsigned		width;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 2f43ee8..83378c3 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -137,14 +137,19 @@
 
 void rv370_pcie_gart_disable(struct radeon_device *rdev)
 {
-	uint32_t tmp;
+	u32 tmp;
+	int r;
 
 	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
 	tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
 	WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
 	if (rdev->gart.table.vram.robj) {
-		radeon_object_kunmap(rdev->gart.table.vram.robj);
-		radeon_object_unpin(rdev->gart.table.vram.robj);
+		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+		if (likely(r == 0)) {
+			radeon_bo_kunmap(rdev->gart.table.vram.robj);
+			radeon_bo_unpin(rdev->gart.table.vram.robj);
+			radeon_bo_unreserve(rdev->gart.table.vram.robj);
+		}
 	}
 }
 
@@ -1181,6 +1186,9 @@
 {
 	int r;
 
+	/* set common regs */
+	r100_set_common_regs(rdev);
+	/* program mc */
 	r300_mc_program(rdev);
 	/* Resume clock */
 	r300_clock_startup(rdev);
@@ -1193,13 +1201,18 @@
 		if (r)
 			return r;
 	}
+
+	if (rdev->family == CHIP_R300 ||
+	    rdev->family == CHIP_R350 ||
+	    rdev->family == CHIP_RV350)
+		r100_enable_bm(rdev);
+
 	if (rdev->flags & RADEON_IS_PCI) {
 		r = r100_pci_gart_enable(rdev);
 		if (r)
 			return r;
 	}
 	/* Enable IRQ */
-	rdev->irq.sw_int = true;
 	r100_irq_set(rdev);
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
@@ -1237,6 +1250,8 @@
 	radeon_combios_asic_init(rdev->ddev);
 	/* Resume clock after posting */
 	r300_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
 	return r300_startup(rdev);
 }
 
@@ -1265,7 +1280,7 @@
 		r100_pci_gart_fini(rdev);
 	radeon_irq_kms_fini(rdev);
 	radeon_fence_driver_fini(rdev);
-	radeon_object_fini(rdev);
+	radeon_bo_fini(rdev);
 	radeon_atombios_fini(rdev);
 	kfree(rdev->bios);
 	rdev->bios = NULL;
@@ -1303,10 +1318,8 @@
 			RREG32(R_0007C0_CP_STAT));
 	}
 	/* check if cards are posted or not */
-	if (!radeon_card_posted(rdev) && rdev->bios) {
-		DRM_INFO("GPU not posted. posting now...\n");
-		radeon_combios_asic_init(rdev->ddev);
-	}
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
 	/* Set asic errata */
 	r300_errata(rdev);
 	/* Initialize clocks */
@@ -1325,7 +1338,7 @@
 	if (r)
 		return r;
 	/* Memory manager */
-	r = radeon_object_init(rdev);
+	r = radeon_bo_init(rdev);
 	if (r)
 		return r;
 	if (rdev->flags & RADEON_IS_PCIE) {
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 1cefdbc..c05a727 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -169,6 +169,9 @@
 {
 	int r;
 
+	/* set common regs */
+	r100_set_common_regs(rdev);
+	/* program mc */
 	r300_mc_program(rdev);
 	/* Resume clock */
 	r420_clock_resume(rdev);
@@ -186,7 +189,6 @@
 	}
 	r420_pipes_init(rdev);
 	/* Enable IRQ */
-	rdev->irq.sw_int = true;
 	r100_irq_set(rdev);
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
@@ -229,7 +231,8 @@
 	}
 	/* Resume clock after posting */
 	r420_clock_resume(rdev);
-
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
 	return r420_startup(rdev);
 }
 
@@ -258,7 +261,7 @@
 	radeon_agp_fini(rdev);
 	radeon_irq_kms_fini(rdev);
 	radeon_fence_driver_fini(rdev);
-	radeon_object_fini(rdev);
+	radeon_bo_fini(rdev);
 	if (rdev->is_atom_bios) {
 		radeon_atombios_fini(rdev);
 	} else {
@@ -301,14 +304,9 @@
 			RREG32(R_0007C0_CP_STAT));
 	}
 	/* check if cards are posted or not */
-	if (!radeon_card_posted(rdev) && rdev->bios) {
-		DRM_INFO("GPU not posted. posting now...\n");
-		if (rdev->is_atom_bios) {
-			atom_asic_init(rdev->mode_info.atom_context);
-		} else {
-			radeon_combios_asic_init(rdev->ddev);
-		}
-	}
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+
 	/* Initialize clocks */
 	radeon_get_clock_info(rdev->ddev);
 	/* Initialize power management */
@@ -331,10 +329,13 @@
 		return r;
 	}
 	/* Memory manager */
-	r = radeon_object_init(rdev);
+	r = radeon_bo_init(rdev);
 	if (r) {
 		return r;
 	}
+	if (rdev->family == CHIP_R420)
+		r100_enable_bm(rdev);
+
 	if (rdev->flags & RADEON_IS_PCIE) {
 		r = rv370_pcie_gart_init(rdev);
 		if (r)
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 7baa739..74ad89b 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -716,6 +716,8 @@
 
 #define AVIVO_DVOA_BIT_DEPTH_CONTROL			0x7988
 
+#define AVIVO_DC_GPIO_HPD_A                 0x7e94
+
 #define AVIVO_GPIO_0                        0x7e30
 #define AVIVO_GPIO_1                        0x7e40
 #define AVIVO_GPIO_2                        0x7e50
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index f743518..0f3843b 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -185,7 +185,6 @@
 			return r;
 	}
 	/* Enable IRQ */
-	rdev->irq.sw_int = true;
 	rs600_irq_set(rdev);
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
@@ -221,6 +220,8 @@
 	atom_asic_init(rdev->mode_info.atom_context);
 	/* Resume clock after posting */
 	rv515_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
 	return r520_startup(rdev);
 }
 
@@ -254,6 +255,9 @@
 			RREG32(R_0007C0_CP_STAT));
 	}
 	/* check if cards are posted or not */
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+
 	if (!radeon_card_posted(rdev) && rdev->bios) {
 		DRM_INFO("GPU not posted. posting now...\n");
 		atom_asic_init(rdev->mode_info.atom_context);
@@ -277,7 +281,7 @@
 	if (r)
 		return r;
 	/* Memory manager */
-	r = radeon_object_init(rdev);
+	r = radeon_bo_init(rdev);
 	if (r)
 		return r;
 	r = rv370_pcie_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6740ed2..36656bd 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -38,8 +38,10 @@
 
 #define PFP_UCODE_SIZE 576
 #define PM4_UCODE_SIZE 1792
+#define RLC_UCODE_SIZE 768
 #define R700_PFP_UCODE_SIZE 848
 #define R700_PM4_UCODE_SIZE 1360
+#define R700_RLC_UCODE_SIZE 1024
 
 /* Firmware Names */
 MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@ -62,6 +64,8 @@
 MODULE_FIRMWARE("radeon/RV730_me.bin");
 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
 MODULE_FIRMWARE("radeon/RV710_me.bin");
+MODULE_FIRMWARE("radeon/R600_rlc.bin");
+MODULE_FIRMWARE("radeon/R700_rlc.bin");
 
 int r600_debugfs_mc_info_init(struct radeon_device *rdev);
 
@@ -70,6 +74,281 @@
 void r600_gpu_init(struct radeon_device *rdev);
 void r600_fini(struct radeon_device *rdev);
 
+/* hpd for digital panel detect/disconnect */
+bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+	bool connected = false;
+
+	if (ASIC_IS_DCE3(rdev)) {
+		switch (hpd) {
+		case RADEON_HPD_1:
+			if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
+				connected = true;
+			break;
+		case RADEON_HPD_2:
+			if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
+				connected = true;
+			break;
+		case RADEON_HPD_3:
+			if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
+				connected = true;
+			break;
+		case RADEON_HPD_4:
+			if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
+				connected = true;
+			break;
+			/* DCE 3.2 */
+		case RADEON_HPD_5:
+			if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
+				connected = true;
+			break;
+		case RADEON_HPD_6:
+			if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
+				connected = true;
+			break;
+		default:
+			break;
+		}
+	} else {
+		switch (hpd) {
+		case RADEON_HPD_1:
+			if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+				connected = true;
+			break;
+		case RADEON_HPD_2:
+			if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+				connected = true;
+			break;
+		case RADEON_HPD_3:
+			if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+				connected = true;
+			break;
+		default:
+			break;
+		}
+	}
+	return connected;
+}
+
+void r600_hpd_set_polarity(struct radeon_device *rdev,
+			   enum radeon_hpd_id hpd)
+{
+	u32 tmp;
+	bool connected = r600_hpd_sense(rdev, hpd);
+
+	if (ASIC_IS_DCE3(rdev)) {
+		switch (hpd) {
+		case RADEON_HPD_1:
+			tmp = RREG32(DC_HPD1_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HPDx_INT_POLARITY;
+			else
+				tmp |= DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD1_INT_CONTROL, tmp);
+			break;
+		case RADEON_HPD_2:
+			tmp = RREG32(DC_HPD2_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HPDx_INT_POLARITY;
+			else
+				tmp |= DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD2_INT_CONTROL, tmp);
+			break;
+		case RADEON_HPD_3:
+			tmp = RREG32(DC_HPD3_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HPDx_INT_POLARITY;
+			else
+				tmp |= DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD3_INT_CONTROL, tmp);
+			break;
+		case RADEON_HPD_4:
+			tmp = RREG32(DC_HPD4_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HPDx_INT_POLARITY;
+			else
+				tmp |= DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD4_INT_CONTROL, tmp);
+			break;
+		case RADEON_HPD_5:
+			tmp = RREG32(DC_HPD5_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HPDx_INT_POLARITY;
+			else
+				tmp |= DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD5_INT_CONTROL, tmp);
+			break;
+			/* DCE 3.2 */
+		case RADEON_HPD_6:
+			tmp = RREG32(DC_HPD6_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HPDx_INT_POLARITY;
+			else
+				tmp |= DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD6_INT_CONTROL, tmp);
+			break;
+		default:
+			break;
+		}
+	} else {
+		switch (hpd) {
+		case RADEON_HPD_1:
+			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+			else
+				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+			break;
+		case RADEON_HPD_2:
+			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+			else
+				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+			break;
+		case RADEON_HPD_3:
+			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+			else
+				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+void r600_hpd_init(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+
+	if (ASIC_IS_DCE3(rdev)) {
+		u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
+		if (ASIC_IS_DCE32(rdev))
+			tmp |= DC_HPDx_EN;
+
+		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+			switch (radeon_connector->hpd.hpd) {
+			case RADEON_HPD_1:
+				WREG32(DC_HPD1_CONTROL, tmp);
+				rdev->irq.hpd[0] = true;
+				break;
+			case RADEON_HPD_2:
+				WREG32(DC_HPD2_CONTROL, tmp);
+				rdev->irq.hpd[1] = true;
+				break;
+			case RADEON_HPD_3:
+				WREG32(DC_HPD3_CONTROL, tmp);
+				rdev->irq.hpd[2] = true;
+				break;
+			case RADEON_HPD_4:
+				WREG32(DC_HPD4_CONTROL, tmp);
+				rdev->irq.hpd[3] = true;
+				break;
+				/* DCE 3.2 */
+			case RADEON_HPD_5:
+				WREG32(DC_HPD5_CONTROL, tmp);
+				rdev->irq.hpd[4] = true;
+				break;
+			case RADEON_HPD_6:
+				WREG32(DC_HPD6_CONTROL, tmp);
+				rdev->irq.hpd[5] = true;
+				break;
+			default:
+				break;
+			}
+		}
+	} else {
+		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+			switch (radeon_connector->hpd.hpd) {
+			case RADEON_HPD_1:
+				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+				rdev->irq.hpd[0] = true;
+				break;
+			case RADEON_HPD_2:
+				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+				rdev->irq.hpd[1] = true;
+				break;
+			case RADEON_HPD_3:
+				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+				rdev->irq.hpd[2] = true;
+				break;
+			default:
+				break;
+			}
+		}
+	}
+	r600_irq_set(rdev);
+}
+
+void r600_hpd_fini(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+
+	if (ASIC_IS_DCE3(rdev)) {
+		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+			switch (radeon_connector->hpd.hpd) {
+			case RADEON_HPD_1:
+				WREG32(DC_HPD1_CONTROL, 0);
+				rdev->irq.hpd[0] = false;
+				break;
+			case RADEON_HPD_2:
+				WREG32(DC_HPD2_CONTROL, 0);
+				rdev->irq.hpd[1] = false;
+				break;
+			case RADEON_HPD_3:
+				WREG32(DC_HPD3_CONTROL, 0);
+				rdev->irq.hpd[2] = false;
+				break;
+			case RADEON_HPD_4:
+				WREG32(DC_HPD4_CONTROL, 0);
+				rdev->irq.hpd[3] = false;
+				break;
+				/* DCE 3.2 */
+			case RADEON_HPD_5:
+				WREG32(DC_HPD5_CONTROL, 0);
+				rdev->irq.hpd[4] = false;
+				break;
+			case RADEON_HPD_6:
+				WREG32(DC_HPD6_CONTROL, 0);
+				rdev->irq.hpd[5] = false;
+				break;
+			default:
+				break;
+			}
+		}
+	} else {
+		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+			switch (radeon_connector->hpd.hpd) {
+			case RADEON_HPD_1:
+				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
+				rdev->irq.hpd[0] = false;
+				break;
+			case RADEON_HPD_2:
+				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
+				rdev->irq.hpd[1] = false;
+				break;
+			case RADEON_HPD_3:
+				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
+				rdev->irq.hpd[2] = false;
+				break;
+			default:
+				break;
+			}
+		}
+	}
+}
+
 /*
  * R600 PCIE GART
  */
@@ -180,7 +459,7 @@
 void r600_pcie_gart_disable(struct radeon_device *rdev)
 {
 	u32 tmp;
-	int i;
+	int i, r;
 
 	/* Disable all tables */
 	for (i = 0; i < 7; i++)
@@ -208,8 +487,12 @@
 	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
 	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
 	if (rdev->gart.table.vram.robj) {
-		radeon_object_kunmap(rdev->gart.table.vram.robj);
-		radeon_object_unpin(rdev->gart.table.vram.robj);
+		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+		if (likely(r == 0)) {
+			radeon_bo_kunmap(rdev->gart.table.vram.robj);
+			radeon_bo_unpin(rdev->gart.table.vram.robj);
+			radeon_bo_unreserve(rdev->gart.table.vram.robj);
+		}
 	}
 }
 
@@ -1101,6 +1384,10 @@
 	(void)RREG32(PCIE_PORT_DATA);
 }
 
+void r600_hdp_flush(struct radeon_device *rdev)
+{
+	WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+}
 
 /*
  * CP & Ring
@@ -1110,11 +1397,12 @@
 	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
 }
 
-int r600_cp_init_microcode(struct radeon_device *rdev)
+int r600_init_microcode(struct radeon_device *rdev)
 {
 	struct platform_device *pdev;
 	const char *chip_name;
-	size_t pfp_req_size, me_req_size;
+	const char *rlc_chip_name;
+	size_t pfp_req_size, me_req_size, rlc_req_size;
 	char fw_name[30];
 	int err;
 
@@ -1128,30 +1416,62 @@
 	}
 
 	switch (rdev->family) {
-	case CHIP_R600: chip_name = "R600"; break;
-	case CHIP_RV610: chip_name = "RV610"; break;
-	case CHIP_RV630: chip_name = "RV630"; break;
-	case CHIP_RV620: chip_name = "RV620"; break;
-	case CHIP_RV635: chip_name = "RV635"; break;
-	case CHIP_RV670: chip_name = "RV670"; break;
+	case CHIP_R600:
+		chip_name = "R600";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RV610:
+		chip_name = "RV610";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RV630:
+		chip_name = "RV630";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RV620:
+		chip_name = "RV620";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RV635:
+		chip_name = "RV635";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RV670:
+		chip_name = "RV670";
+		rlc_chip_name = "R600";
+		break;
 	case CHIP_RS780:
-	case CHIP_RS880: chip_name = "RS780"; break;
-	case CHIP_RV770: chip_name = "RV770"; break;
+	case CHIP_RS880:
+		chip_name = "RS780";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RV770:
+		chip_name = "RV770";
+		rlc_chip_name = "R700";
+		break;
 	case CHIP_RV730:
-	case CHIP_RV740: chip_name = "RV730"; break;
-	case CHIP_RV710: chip_name = "RV710"; break;
+	case CHIP_RV740:
+		chip_name = "RV730";
+		rlc_chip_name = "R700";
+		break;
+	case CHIP_RV710:
+		chip_name = "RV710";
+		rlc_chip_name = "R700";
+		break;
 	default: BUG();
 	}
 
 	if (rdev->family >= CHIP_RV770) {
 		pfp_req_size = R700_PFP_UCODE_SIZE * 4;
 		me_req_size = R700_PM4_UCODE_SIZE * 4;
+		rlc_req_size = R700_RLC_UCODE_SIZE * 4;
 	} else {
 		pfp_req_size = PFP_UCODE_SIZE * 4;
 		me_req_size = PM4_UCODE_SIZE * 12;
+		rlc_req_size = RLC_UCODE_SIZE * 4;
 	}
 
-	DRM_INFO("Loading %s CP Microcode\n", chip_name);
+	DRM_INFO("Loading %s Microcode\n", chip_name);
 
 	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
 	err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
@@ -1175,6 +1495,18 @@
 		       rdev->me_fw->size, fw_name);
 		err = -EINVAL;
 	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
+	err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
+	if (err)
+		goto out;
+	if (rdev->rlc_fw->size != rlc_req_size) {
+		printk(KERN_ERR
+		       "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->rlc_fw->size, fw_name);
+		err = -EINVAL;
+	}
+
 out:
 	platform_device_unregister(pdev);
 
@@ -1187,6 +1519,8 @@
 		rdev->pfp_fw = NULL;
 		release_firmware(rdev->me_fw);
 		rdev->me_fw = NULL;
+		release_firmware(rdev->rlc_fw);
+		rdev->rlc_fw = NULL;
 	}
 	return err;
 }
@@ -1381,10 +1715,16 @@
 
 void r600_wb_disable(struct radeon_device *rdev)
 {
+	int r;
+
 	WREG32(SCRATCH_UMSK, 0);
 	if (rdev->wb.wb_obj) {
-		radeon_object_kunmap(rdev->wb.wb_obj);
-		radeon_object_unpin(rdev->wb.wb_obj);
+		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+		if (unlikely(r != 0))
+			return;
+		radeon_bo_kunmap(rdev->wb.wb_obj);
+		radeon_bo_unpin(rdev->wb.wb_obj);
+		radeon_bo_unreserve(rdev->wb.wb_obj);
 	}
 }
 
@@ -1392,7 +1732,7 @@
 {
 	r600_wb_disable(rdev);
 	if (rdev->wb.wb_obj) {
-		radeon_object_unref(&rdev->wb.wb_obj);
+		radeon_bo_unref(&rdev->wb.wb_obj);
 		rdev->wb.wb = NULL;
 		rdev->wb.wb_obj = NULL;
 	}
@@ -1403,22 +1743,29 @@
 	int r;
 
 	if (rdev->wb.wb_obj == NULL) {
-		r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
-				RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj);
+		r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
+				RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
 		if (r) {
-			dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r);
+			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
 			return r;
 		}
-		r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
-				&rdev->wb.gpu_addr);
-		if (r) {
-			dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r);
+		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+		if (unlikely(r != 0)) {
 			r600_wb_fini(rdev);
 			return r;
 		}
-		r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+				&rdev->wb.gpu_addr);
 		if (r) {
-			dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r);
+			radeon_bo_unreserve(rdev->wb.wb_obj);
+			dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
+			r600_wb_fini(rdev);
+			return r;
+		}
+		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+		radeon_bo_unreserve(rdev->wb.wb_obj);
+		if (r) {
+			dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
 			r600_wb_fini(rdev);
 			return r;
 		}
@@ -1433,10 +1780,14 @@
 void r600_fence_ring_emit(struct radeon_device *rdev,
 			  struct radeon_fence *fence)
 {
+	/* Also consider EVENT_WRITE_EOP.  it handles the interrupts + timestamps + events */
 	/* Emit fence sequence & fire IRQ */
 	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
 	radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
 	radeon_ring_write(rdev, fence->seq);
+	/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
+	radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
+	radeon_ring_write(rdev, RB_INT_STAT);
 }
 
 int r600_copy_dma(struct radeon_device *rdev,
@@ -1459,18 +1810,6 @@
 	return 0;
 }
 
-int r600_irq_process(struct radeon_device *rdev)
-{
-	/* FIXME: implement */
-	return 0;
-}
-
-int r600_irq_set(struct radeon_device *rdev)
-{
-	/* FIXME: implement */
-	return 0;
-}
-
 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
 			 uint32_t tiling_flags, uint32_t pitch,
 			 uint32_t offset, uint32_t obj_size)
@@ -1506,6 +1845,14 @@
 {
 	int r;
 
+	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+		r = r600_init_microcode(rdev);
+		if (r) {
+			DRM_ERROR("Failed to load firmware!\n");
+			return r;
+		}
+	}
+
 	r600_mc_program(rdev);
 	if (rdev->flags & RADEON_IS_AGP) {
 		r600_agp_enable(rdev);
@@ -1516,13 +1863,26 @@
 	}
 	r600_gpu_init(rdev);
 
-	r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
-			      &rdev->r600_blit.shader_gpu_addr);
+	r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+			&rdev->r600_blit.shader_gpu_addr);
+	radeon_bo_unreserve(rdev->r600_blit.shader_obj);
 	if (r) {
-		DRM_ERROR("failed to pin blit object %d\n", r);
+		dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
 		return r;
 	}
 
+	/* Enable IRQ */
+	r = r600_irq_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: IH init failed (%d).\n", r);
+		radeon_irq_kms_fini(rdev);
+		return r;
+	}
+	r600_irq_set(rdev);
+
 	r = radeon_ring_init(rdev, rdev->cp.ring_size);
 	if (r)
 		return r;
@@ -1583,13 +1943,19 @@
 
 int r600_suspend(struct radeon_device *rdev)
 {
+	int r;
+
 	/* FIXME: we should wait for ring to be empty */
 	r600_cp_stop(rdev);
 	rdev->cp.ready = false;
 	r600_wb_disable(rdev);
 	r600_pcie_gart_disable(rdev);
 	/* unpin shaders bo */
-	radeon_object_unpin(rdev->r600_blit.shader_obj);
+	r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+	if (unlikely(r != 0))
+		return r;
+	radeon_bo_unpin(rdev->r600_blit.shader_obj);
+	radeon_bo_unreserve(rdev->r600_blit.shader_obj);
 	return 0;
 }
 
@@ -1627,7 +1993,11 @@
 	if (r)
 		return r;
 	/* Post card if necessary */
-	if (!r600_card_posted(rdev) && rdev->bios) {
+	if (!r600_card_posted(rdev)) {
+		if (!rdev->bios) {
+			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+			return -EINVAL;
+		}
 		DRM_INFO("GPU not posted. posting now...\n");
 		atom_asic_init(rdev->mode_info.atom_context);
 	}
@@ -1650,31 +2020,31 @@
 	if (r)
 		return r;
 	/* Memory manager */
-	r = radeon_object_init(rdev);
+	r = radeon_bo_init(rdev);
 	if (r)
 		return r;
+
+	r = radeon_irq_kms_init(rdev);
+	if (r)
+		return r;
+
 	rdev->cp.ring_obj = NULL;
 	r600_ring_init(rdev, 1024 * 1024);
 
-	if (!rdev->me_fw || !rdev->pfp_fw) {
-		r = r600_cp_init_microcode(rdev);
-		if (r) {
-			DRM_ERROR("Failed to load firmware!\n");
-			return r;
-		}
-	}
+	rdev->ih.ring_obj = NULL;
+	r600_ih_ring_init(rdev, 64 * 1024);
 
 	r = r600_pcie_gart_init(rdev);
 	if (r)
 		return r;
 
-	rdev->accel_working = true;
 	r = r600_blit_init(rdev);
 	if (r) {
-		DRM_ERROR("radeon: failled blitter (%d).\n", r);
+		DRM_ERROR("radeon: failed blitter (%d).\n", r);
 		return r;
 	}
 
+	rdev->accel_working = true;
 	r = r600_startup(rdev);
 	if (r) {
 		r600_suspend(rdev);
@@ -1686,12 +2056,12 @@
 	if (rdev->accel_working) {
 		r = radeon_ib_pool_init(rdev);
 		if (r) {
-			DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
+			DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
 			rdev->accel_working = false;
 		}
 		r = r600_ib_test(rdev);
 		if (r) {
-			DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+			DRM_ERROR("radeon: failed testing IB (%d).\n", r);
 			rdev->accel_working = false;
 		}
 	}
@@ -1704,6 +2074,8 @@
 	r600_suspend(rdev);
 
 	r600_blit_fini(rdev);
+	r600_irq_fini(rdev);
+	radeon_irq_kms_fini(rdev);
 	radeon_ring_fini(rdev);
 	r600_wb_fini(rdev);
 	r600_pcie_gart_fini(rdev);
@@ -1712,7 +2084,7 @@
 	radeon_clocks_fini(rdev);
 	if (rdev->flags & RADEON_IS_AGP)
 		radeon_agp_fini(rdev);
-	radeon_object_fini(rdev);
+	radeon_bo_fini(rdev);
 	radeon_atombios_fini(rdev);
 	kfree(rdev->bios);
 	rdev->bios = NULL;
@@ -1798,8 +2170,657 @@
 	return r;
 }
 
+/*
+ * Interrupts
+ *
+ * Interrupts use a ring buffer on r6xx/r7xx hardware.  It works pretty
+ * the same as the CP ring buffer, but in reverse.  Rather than the CPU
+ * writing to the ring and the GPU consuming, the GPU writes to the ring
+ * and host consumes.  As the host irq handler processes interrupts, it
+ * increments the rptr.  When the rptr catches up with the wptr, all the
+ * current interrupts have been processed.
+ */
 
+void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
+{
+	u32 rb_bufsz;
 
+	/* Align ring size */
+	rb_bufsz = drm_order(ring_size / 4);
+	ring_size = (1 << rb_bufsz) * 4;
+	rdev->ih.ring_size = ring_size;
+	rdev->ih.align_mask = 4 - 1;
+}
+
+static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size)
+{
+	int r;
+
+	rdev->ih.ring_size = ring_size;
+	/* Allocate ring buffer */
+	if (rdev->ih.ring_obj == NULL) {
+		r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
+				     true,
+				     RADEON_GEM_DOMAIN_GTT,
+				     &rdev->ih.ring_obj);
+		if (r) {
+			DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
+			return r;
+		}
+		r = radeon_bo_reserve(rdev->ih.ring_obj, false);
+		if (unlikely(r != 0))
+			return r;
+		r = radeon_bo_pin(rdev->ih.ring_obj,
+				  RADEON_GEM_DOMAIN_GTT,
+				  &rdev->ih.gpu_addr);
+		if (r) {
+			radeon_bo_unreserve(rdev->ih.ring_obj);
+			DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
+			return r;
+		}
+		r = radeon_bo_kmap(rdev->ih.ring_obj,
+				   (void **)&rdev->ih.ring);
+		radeon_bo_unreserve(rdev->ih.ring_obj);
+		if (r) {
+			DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
+			return r;
+		}
+	}
+	rdev->ih.ptr_mask = (rdev->cp.ring_size / 4) - 1;
+	rdev->ih.rptr = 0;
+
+	return 0;
+}
+
+static void r600_ih_ring_fini(struct radeon_device *rdev)
+{
+	int r;
+	if (rdev->ih.ring_obj) {
+		r = radeon_bo_reserve(rdev->ih.ring_obj, false);
+		if (likely(r == 0)) {
+			radeon_bo_kunmap(rdev->ih.ring_obj);
+			radeon_bo_unpin(rdev->ih.ring_obj);
+			radeon_bo_unreserve(rdev->ih.ring_obj);
+		}
+		radeon_bo_unref(&rdev->ih.ring_obj);
+		rdev->ih.ring = NULL;
+		rdev->ih.ring_obj = NULL;
+	}
+}
+
+static void r600_rlc_stop(struct radeon_device *rdev)
+{
+
+	if (rdev->family >= CHIP_RV770) {
+		/* r7xx asics need to soft reset RLC before halting */
+		WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
+		RREG32(SRBM_SOFT_RESET);
+		udelay(15000);
+		WREG32(SRBM_SOFT_RESET, 0);
+		RREG32(SRBM_SOFT_RESET);
+	}
+
+	WREG32(RLC_CNTL, 0);
+}
+
+static void r600_rlc_start(struct radeon_device *rdev)
+{
+	WREG32(RLC_CNTL, RLC_ENABLE);
+}
+
+static int r600_rlc_init(struct radeon_device *rdev)
+{
+	u32 i;
+	const __be32 *fw_data;
+
+	if (!rdev->rlc_fw)
+		return -EINVAL;
+
+	r600_rlc_stop(rdev);
+
+	WREG32(RLC_HB_BASE, 0);
+	WREG32(RLC_HB_CNTL, 0);
+	WREG32(RLC_HB_RPTR, 0);
+	WREG32(RLC_HB_WPTR, 0);
+	WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
+	WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
+	WREG32(RLC_MC_CNTL, 0);
+	WREG32(RLC_UCODE_CNTL, 0);
+
+	fw_data = (const __be32 *)rdev->rlc_fw->data;
+	if (rdev->family >= CHIP_RV770) {
+		for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
+			WREG32(RLC_UCODE_ADDR, i);
+			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+		}
+	} else {
+		for (i = 0; i < RLC_UCODE_SIZE; i++) {
+			WREG32(RLC_UCODE_ADDR, i);
+			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+		}
+	}
+	WREG32(RLC_UCODE_ADDR, 0);
+
+	r600_rlc_start(rdev);
+
+	return 0;
+}
+
+static void r600_enable_interrupts(struct radeon_device *rdev)
+{
+	u32 ih_cntl = RREG32(IH_CNTL);
+	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+
+	ih_cntl |= ENABLE_INTR;
+	ih_rb_cntl |= IH_RB_ENABLE;
+	WREG32(IH_CNTL, ih_cntl);
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+	rdev->ih.enabled = true;
+}
+
+static void r600_disable_interrupts(struct radeon_device *rdev)
+{
+	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+	u32 ih_cntl = RREG32(IH_CNTL);
+
+	ih_rb_cntl &= ~IH_RB_ENABLE;
+	ih_cntl &= ~ENABLE_INTR;
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+	WREG32(IH_CNTL, ih_cntl);
+	/* set rptr, wptr to 0 */
+	WREG32(IH_RB_RPTR, 0);
+	WREG32(IH_RB_WPTR, 0);
+	rdev->ih.enabled = false;
+	rdev->ih.wptr = 0;
+	rdev->ih.rptr = 0;
+}
+
+static void r600_disable_interrupt_state(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	WREG32(CP_INT_CNTL, 0);
+	WREG32(GRBM_INT_CNTL, 0);
+	WREG32(DxMODE_INT_MASK, 0);
+	if (ASIC_IS_DCE3(rdev)) {
+		WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
+		WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
+		tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD1_INT_CONTROL, tmp);
+		tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD2_INT_CONTROL, tmp);
+		tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD3_INT_CONTROL, tmp);
+		tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD4_INT_CONTROL, tmp);
+		if (ASIC_IS_DCE32(rdev)) {
+			tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD5_INT_CONTROL, 0);
+			tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD6_INT_CONTROL, 0);
+		}
+	} else {
+		WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+		WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
+		tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, 0);
+		tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, 0);
+		tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, 0);
+	}
+}
+
+int r600_irq_init(struct radeon_device *rdev)
+{
+	int ret = 0;
+	int rb_bufsz;
+	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+
+	/* allocate ring */
+	ret = r600_ih_ring_alloc(rdev, rdev->ih.ring_size);
+	if (ret)
+		return ret;
+
+	/* disable irqs */
+	r600_disable_interrupts(rdev);
+
+	/* init rlc */
+	ret = r600_rlc_init(rdev);
+	if (ret) {
+		r600_ih_ring_fini(rdev);
+		return ret;
+	}
+
+	/* setup interrupt control */
+	/* set dummy read address to ring address */
+	WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+	interrupt_cntl = RREG32(INTERRUPT_CNTL);
+	/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
+	 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
+	 */
+	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
+	/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
+	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
+	WREG32(INTERRUPT_CNTL, interrupt_cntl);
+
+	WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
+	rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+
+	ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
+		      IH_WPTR_OVERFLOW_CLEAR |
+		      (rb_bufsz << 1));
+	/* WPTR writeback, not yet */
+	/*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
+	WREG32(IH_RB_WPTR_ADDR_LO, 0);
+	WREG32(IH_RB_WPTR_ADDR_HI, 0);
+
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+
+	/* set rptr, wptr to 0 */
+	WREG32(IH_RB_RPTR, 0);
+	WREG32(IH_RB_WPTR, 0);
+
+	/* Default settings for IH_CNTL (disabled at first) */
+	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
+	/* RPTR_REARM only works if msi's are enabled */
+	if (rdev->msi_enabled)
+		ih_cntl |= RPTR_REARM;
+
+#ifdef __BIG_ENDIAN
+	ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
+#endif
+	WREG32(IH_CNTL, ih_cntl);
+
+	/* force the active interrupt state to all disabled */
+	r600_disable_interrupt_state(rdev);
+
+	/* enable irqs */
+	r600_enable_interrupts(rdev);
+
+	return ret;
+}
+
+void r600_irq_fini(struct radeon_device *rdev)
+{
+	r600_disable_interrupts(rdev);
+	r600_rlc_stop(rdev);
+	r600_ih_ring_fini(rdev);
+}
+
+int r600_irq_set(struct radeon_device *rdev)
+{
+	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+	u32 mode_int = 0;
+	u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
+
+	/* don't enable anything if the ih is disabled */
+	if (!rdev->ih.enabled)
+		return 0;
+
+	if (ASIC_IS_DCE3(rdev)) {
+		hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		if (ASIC_IS_DCE32(rdev)) {
+			hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+			hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		}
+	} else {
+		hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+	}
+
+	if (rdev->irq.sw_int) {
+		DRM_DEBUG("r600_irq_set: sw int\n");
+		cp_int_cntl |= RB_INT_ENABLE;
+	}
+	if (rdev->irq.crtc_vblank_int[0]) {
+		DRM_DEBUG("r600_irq_set: vblank 0\n");
+		mode_int |= D1MODE_VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[1]) {
+		DRM_DEBUG("r600_irq_set: vblank 1\n");
+		mode_int |= D2MODE_VBLANK_INT_MASK;
+	}
+	if (rdev->irq.hpd[0]) {
+		DRM_DEBUG("r600_irq_set: hpd 1\n");
+		hpd1 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[1]) {
+		DRM_DEBUG("r600_irq_set: hpd 2\n");
+		hpd2 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[2]) {
+		DRM_DEBUG("r600_irq_set: hpd 3\n");
+		hpd3 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[3]) {
+		DRM_DEBUG("r600_irq_set: hpd 4\n");
+		hpd4 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[4]) {
+		DRM_DEBUG("r600_irq_set: hpd 5\n");
+		hpd5 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[5]) {
+		DRM_DEBUG("r600_irq_set: hpd 6\n");
+		hpd6 |= DC_HPDx_INT_EN;
+	}
+
+	WREG32(CP_INT_CNTL, cp_int_cntl);
+	WREG32(DxMODE_INT_MASK, mode_int);
+	if (ASIC_IS_DCE3(rdev)) {
+		WREG32(DC_HPD1_INT_CONTROL, hpd1);
+		WREG32(DC_HPD2_INT_CONTROL, hpd2);
+		WREG32(DC_HPD3_INT_CONTROL, hpd3);
+		WREG32(DC_HPD4_INT_CONTROL, hpd4);
+		if (ASIC_IS_DCE32(rdev)) {
+			WREG32(DC_HPD5_INT_CONTROL, hpd5);
+			WREG32(DC_HPD6_INT_CONTROL, hpd6);
+		}
+	} else {
+		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
+		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
+		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
+	}
+
+	return 0;
+}
+
+static inline void r600_irq_ack(struct radeon_device *rdev,
+				u32 *disp_int,
+				u32 *disp_int_cont,
+				u32 *disp_int_cont2)
+{
+	u32 tmp;
+
+	if (ASIC_IS_DCE3(rdev)) {
+		*disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
+		*disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
+		*disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
+	} else {
+		*disp_int = RREG32(DISP_INTERRUPT_STATUS);
+		*disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+		*disp_int_cont2 = 0;
+	}
+
+	if (*disp_int & LB_D1_VBLANK_INTERRUPT)
+		WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
+	if (*disp_int & LB_D1_VLINE_INTERRUPT)
+		WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
+	if (*disp_int & LB_D2_VBLANK_INTERRUPT)
+		WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
+	if (*disp_int & LB_D2_VLINE_INTERRUPT)
+		WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
+	if (*disp_int & DC_HPD1_INTERRUPT) {
+		if (ASIC_IS_DCE3(rdev)) {
+			tmp = RREG32(DC_HPD1_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HPD1_INT_CONTROL, tmp);
+		} else {
+			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+		}
+	}
+	if (*disp_int & DC_HPD2_INTERRUPT) {
+		if (ASIC_IS_DCE3(rdev)) {
+			tmp = RREG32(DC_HPD2_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HPD2_INT_CONTROL, tmp);
+		} else {
+			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+		}
+	}
+	if (*disp_int_cont & DC_HPD3_INTERRUPT) {
+		if (ASIC_IS_DCE3(rdev)) {
+			tmp = RREG32(DC_HPD3_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HPD3_INT_CONTROL, tmp);
+		} else {
+			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
+		}
+	}
+	if (*disp_int_cont & DC_HPD4_INTERRUPT) {
+		tmp = RREG32(DC_HPD4_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD4_INT_CONTROL, tmp);
+	}
+	if (ASIC_IS_DCE32(rdev)) {
+		if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
+			tmp = RREG32(DC_HPD5_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HPD5_INT_CONTROL, tmp);
+		}
+		if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
+			tmp = RREG32(DC_HPD5_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HPD6_INT_CONTROL, tmp);
+		}
+	}
+}
+
+void r600_irq_disable(struct radeon_device *rdev)
+{
+	u32 disp_int, disp_int_cont, disp_int_cont2;
+
+	r600_disable_interrupts(rdev);
+	/* Wait and acknowledge irq */
+	mdelay(1);
+	r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
+	r600_disable_interrupt_state(rdev);
+}
+
+static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
+{
+	u32 wptr, tmp;
+
+	/* XXX use writeback */
+	wptr = RREG32(IH_RB_WPTR);
+
+	if (wptr & RB_OVERFLOW) {
+		WARN_ON(1);
+		/* XXX deal with overflow */
+		DRM_ERROR("IH RB overflow\n");
+		tmp = RREG32(IH_RB_CNTL);
+		tmp |= IH_WPTR_OVERFLOW_CLEAR;
+		WREG32(IH_RB_CNTL, tmp);
+	}
+	wptr = wptr & WPTR_OFFSET_MASK;
+
+	return wptr;
+}
+
+/*        r600 IV Ring
+ * Each IV ring entry is 128 bits:
+ * [7:0]    - interrupt source id
+ * [31:8]   - reserved
+ * [59:32]  - interrupt source data
+ * [127:60]  - reserved
+ *
+ * The basic interrupt vector entries
+ * are decoded as follows:
+ * src_id  src_data  description
+ *      1         0  D1 Vblank
+ *      1         1  D1 Vline
+ *      5         0  D2 Vblank
+ *      5         1  D2 Vline
+ *     19         0  FP Hot plug detection A
+ *     19         1  FP Hot plug detection B
+ *     19         2  DAC A auto-detection
+ *     19         3  DAC B auto-detection
+ *    176         -  CP_INT RB
+ *    177         -  CP_INT IB1
+ *    178         -  CP_INT IB2
+ *    181         -  EOP Interrupt
+ *    233         -  GUI Idle
+ *
+ * Note, these are based on r600 and may need to be
+ * adjusted or added to on newer asics
+ */
+
+int r600_irq_process(struct radeon_device *rdev)
+{
+	u32 wptr = r600_get_ih_wptr(rdev);
+	u32 rptr = rdev->ih.rptr;
+	u32 src_id, src_data;
+	u32 last_entry = rdev->ih.ring_size - 16;
+	u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
+	unsigned long flags;
+	bool queue_hotplug = false;
+
+	DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+
+	spin_lock_irqsave(&rdev->ih.lock, flags);
+
+	if (rptr == wptr) {
+		spin_unlock_irqrestore(&rdev->ih.lock, flags);
+		return IRQ_NONE;
+	}
+	if (rdev->shutdown) {
+		spin_unlock_irqrestore(&rdev->ih.lock, flags);
+		return IRQ_NONE;
+	}
+
+restart_ih:
+	/* display interrupts */
+	r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
+
+	rdev->ih.wptr = wptr;
+	while (rptr != wptr) {
+		/* wptr/rptr are in bytes! */
+		ring_index = rptr / 4;
+		src_id =  rdev->ih.ring[ring_index] & 0xff;
+		src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
+
+		switch (src_id) {
+		case 1: /* D1 vblank/vline */
+			switch (src_data) {
+			case 0: /* D1 vblank */
+				if (disp_int & LB_D1_VBLANK_INTERRUPT) {
+					drm_handle_vblank(rdev->ddev, 0);
+					disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D1 vblank\n");
+				}
+				break;
+			case 1: /* D1 vline */
+				if (disp_int & LB_D1_VLINE_INTERRUPT) {
+					disp_int &= ~LB_D1_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D1 vline\n");
+				}
+				break;
+			default:
+				DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 5: /* D2 vblank/vline */
+			switch (src_data) {
+			case 0: /* D2 vblank */
+				if (disp_int & LB_D2_VBLANK_INTERRUPT) {
+					drm_handle_vblank(rdev->ddev, 1);
+					disp_int &= ~LB_D2_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D2 vblank\n");
+				}
+				break;
+			case 1: /* D1 vline */
+				if (disp_int & LB_D2_VLINE_INTERRUPT) {
+					disp_int &= ~LB_D2_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D2 vline\n");
+				}
+				break;
+			default:
+				DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 19: /* HPD/DAC hotplug */
+			switch (src_data) {
+			case 0:
+				if (disp_int & DC_HPD1_INTERRUPT) {
+					disp_int &= ~DC_HPD1_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD1\n");
+				}
+				break;
+			case 1:
+				if (disp_int & DC_HPD2_INTERRUPT) {
+					disp_int &= ~DC_HPD2_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD2\n");
+				}
+				break;
+			case 4:
+				if (disp_int_cont & DC_HPD3_INTERRUPT) {
+					disp_int_cont &= ~DC_HPD3_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD3\n");
+				}
+				break;
+			case 5:
+				if (disp_int_cont & DC_HPD4_INTERRUPT) {
+					disp_int_cont &= ~DC_HPD4_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD4\n");
+				}
+				break;
+			case 10:
+				if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
+					disp_int_cont &= ~DC_HPD5_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD5\n");
+				}
+				break;
+			case 12:
+				if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
+					disp_int_cont &= ~DC_HPD6_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD6\n");
+				}
+				break;
+			default:
+				DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 176: /* CP_INT in ring buffer */
+		case 177: /* CP_INT in IB1 */
+		case 178: /* CP_INT in IB2 */
+			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
+			radeon_fence_process(rdev);
+			break;
+		case 181: /* CP EOP event */
+			DRM_DEBUG("IH: CP EOP\n");
+			break;
+		default:
+			DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+			break;
+		}
+
+		/* wptr/rptr are in bytes! */
+		if (rptr == last_entry)
+			rptr = 0;
+		else
+			rptr += 16;
+	}
+	/* make sure wptr hasn't changed while processing */
+	wptr = r600_get_ih_wptr(rdev);
+	if (wptr != rdev->ih.wptr)
+		goto restart_ih;
+	if (queue_hotplug)
+		queue_work(rdev->wq, &rdev->hotplug_work);
+	rdev->ih.rptr = rptr;
+	WREG32(IH_RB_RPTR, rdev->ih.rptr);
+	spin_unlock_irqrestore(&rdev->ih.lock, flags);
+	return IRQ_HANDLED;
+}
 
 /*
  * Debugfs info
@@ -1811,21 +2832,21 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct radeon_device *rdev = dev->dev_private;
-	uint32_t rdp, wdp;
 	unsigned count, i, j;
 
 	radeon_ring_free_size(rdev);
-	rdp = RREG32(CP_RB_RPTR);
-	wdp = RREG32(CP_RB_WPTR);
-	count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
+	count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
 	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
-	seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
-	seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
+	seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
+	seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
+	seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
+	seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
 	seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
 	seq_printf(m, "%u dwords in ring\n", count);
+	i = rdev->cp.rptr;
 	for (j = 0; j <= count; j++) {
-		i = (rdp + j) & rdev->cp.ptr_mask;
 		seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
+		i = (i + 1) & rdev->cp.ptr_mask;
 	}
 	return 0;
 }
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index dbf716e..9aecafb 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -473,9 +473,8 @@
 	obj_size += r6xx_ps_size * 4;
 	obj_size = ALIGN(obj_size, 256);
 
-	r = radeon_object_create(rdev, NULL, obj_size,
-				 true, RADEON_GEM_DOMAIN_VRAM,
-				 false, &rdev->r600_blit.shader_obj);
+	r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM,
+				&rdev->r600_blit.shader_obj);
 	if (r) {
 		DRM_ERROR("r600 failed to allocate shader\n");
 		return r;
@@ -485,12 +484,14 @@
 		  obj_size,
 		  rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
 
-	r = radeon_object_kmap(rdev->r600_blit.shader_obj, &ptr);
+	r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
 	if (r) {
 		DRM_ERROR("failed to map blit object %d\n", r);
 		return r;
 	}
-
 	if (rdev->family >= CHIP_RV770)
 		memcpy_toio(ptr + rdev->r600_blit.state_offset,
 			    r7xx_default_state, rdev->r600_blit.state_len * 4);
@@ -500,19 +501,26 @@
 	if (num_packet2s)
 		memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
 			    packet2s, num_packet2s * 4);
-
-
 	memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4);
 	memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
-
-	radeon_object_kunmap(rdev->r600_blit.shader_obj);
+	radeon_bo_kunmap(rdev->r600_blit.shader_obj);
+	radeon_bo_unreserve(rdev->r600_blit.shader_obj);
 	return 0;
 }
 
 void r600_blit_fini(struct radeon_device *rdev)
 {
-	radeon_object_unpin(rdev->r600_blit.shader_obj);
-	radeon_object_unref(&rdev->r600_blit.shader_obj);
+	int r;
+
+	r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+	if (unlikely(r != 0)) {
+		dev_err(rdev->dev, "(%d) can't finish r600 blit\n", r);
+		goto out_unref;
+	}
+	radeon_bo_unpin(rdev->r600_blit.shader_obj);
+	radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+out_unref:
+	radeon_bo_unref(&rdev->r600_blit.shader_obj);
 }
 
 int r600_vb_ib_get(struct radeon_device *rdev)
@@ -569,9 +577,9 @@
 	ring_size = num_loops * dwords_per_loop;
 	/* set default  + shaders */
 	ring_size += 40; /* shaders + def state */
-	ring_size += 3; /* fence emit for VB IB */
+	ring_size += 5; /* fence emit for VB IB */
 	ring_size += 5; /* done copy */
-	ring_size += 3; /* fence emit for done copy */
+	ring_size += 5; /* fence emit for done copy */
 	r = radeon_ring_lock(rdev, ring_size);
 	WARN_ON(r);
 
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 27ab428..05894ed 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -456,7 +456,215 @@
 #define         WAIT_2D_IDLECLEAN_bit                           (1 << 16)
 #define         WAIT_3D_IDLECLEAN_bit                           (1 << 17)
 
+#define IH_RB_CNTL                                        0x3e00
+#       define IH_RB_ENABLE                               (1 << 0)
+#       define IH_IB_SIZE(x)                              ((x) << 1) /* log2 */
+#       define IH_RB_FULL_DRAIN_ENABLE                    (1 << 6)
+#       define IH_WPTR_WRITEBACK_ENABLE                   (1 << 8)
+#       define IH_WPTR_WRITEBACK_TIMER(x)                 ((x) << 9) /* log2 */
+#       define IH_WPTR_OVERFLOW_ENABLE                    (1 << 16)
+#       define IH_WPTR_OVERFLOW_CLEAR                     (1 << 31)
+#define IH_RB_BASE                                        0x3e04
+#define IH_RB_RPTR                                        0x3e08
+#define IH_RB_WPTR                                        0x3e0c
+#       define RB_OVERFLOW                                (1 << 0)
+#       define WPTR_OFFSET_MASK                           0x3fffc
+#define IH_RB_WPTR_ADDR_HI                                0x3e10
+#define IH_RB_WPTR_ADDR_LO                                0x3e14
+#define IH_CNTL                                           0x3e18
+#       define ENABLE_INTR                                (1 << 0)
+#       define IH_MC_SWAP(x)                              ((x) << 2)
+#       define IH_MC_SWAP_NONE                            0
+#       define IH_MC_SWAP_16BIT                           1
+#       define IH_MC_SWAP_32BIT                           2
+#       define IH_MC_SWAP_64BIT                           3
+#       define RPTR_REARM                                 (1 << 4)
+#       define MC_WRREQ_CREDIT(x)                         ((x) << 15)
+#       define MC_WR_CLEAN_CNT(x)                         ((x) << 20)
 
+#define RLC_CNTL                                          0x3f00
+#       define RLC_ENABLE                                 (1 << 0)
+#define RLC_HB_BASE                                       0x3f10
+#define RLC_HB_CNTL                                       0x3f0c
+#define RLC_HB_RPTR                                       0x3f20
+#define RLC_HB_WPTR                                       0x3f1c
+#define RLC_HB_WPTR_LSB_ADDR                              0x3f14
+#define RLC_HB_WPTR_MSB_ADDR                              0x3f18
+#define RLC_MC_CNTL                                       0x3f44
+#define RLC_UCODE_CNTL                                    0x3f48
+#define RLC_UCODE_ADDR                                    0x3f2c
+#define RLC_UCODE_DATA                                    0x3f30
+
+#define SRBM_SOFT_RESET                                   0xe60
+#       define SOFT_RESET_RLC                             (1 << 13)
+
+#define CP_INT_CNTL                                       0xc124
+#       define CNTX_BUSY_INT_ENABLE                       (1 << 19)
+#       define CNTX_EMPTY_INT_ENABLE                      (1 << 20)
+#       define SCRATCH_INT_ENABLE                         (1 << 25)
+#       define TIME_STAMP_INT_ENABLE                      (1 << 26)
+#       define IB2_INT_ENABLE                             (1 << 29)
+#       define IB1_INT_ENABLE                             (1 << 30)
+#       define RB_INT_ENABLE                              (1 << 31)
+#define CP_INT_STATUS                                     0xc128
+#       define SCRATCH_INT_STAT                           (1 << 25)
+#       define TIME_STAMP_INT_STAT                        (1 << 26)
+#       define IB2_INT_STAT                               (1 << 29)
+#       define IB1_INT_STAT                               (1 << 30)
+#       define RB_INT_STAT                                (1 << 31)
+
+#define GRBM_INT_CNTL                                     0x8060
+#       define RDERR_INT_ENABLE                           (1 << 0)
+#       define WAIT_COUNT_TIMEOUT_INT_ENABLE              (1 << 1)
+#       define GUI_IDLE_INT_ENABLE                        (1 << 19)
+
+#define INTERRUPT_CNTL                                    0x5468
+#       define IH_DUMMY_RD_OVERRIDE                       (1 << 0)
+#       define IH_DUMMY_RD_EN                             (1 << 1)
+#       define IH_REQ_NONSNOOP_EN                         (1 << 3)
+#       define GEN_IH_INT_EN                              (1 << 8)
+#define INTERRUPT_CNTL2                                   0x546c
+
+#define D1MODE_VBLANK_STATUS                              0x6534
+#define D2MODE_VBLANK_STATUS                              0x6d34
+#       define DxMODE_VBLANK_OCCURRED                     (1 << 0)
+#       define DxMODE_VBLANK_ACK                          (1 << 4)
+#       define DxMODE_VBLANK_STAT                         (1 << 12)
+#       define DxMODE_VBLANK_INTERRUPT                    (1 << 16)
+#       define DxMODE_VBLANK_INTERRUPT_TYPE               (1 << 17)
+#define D1MODE_VLINE_STATUS                               0x653c
+#define D2MODE_VLINE_STATUS                               0x6d3c
+#       define DxMODE_VLINE_OCCURRED                      (1 << 0)
+#       define DxMODE_VLINE_ACK                           (1 << 4)
+#       define DxMODE_VLINE_STAT                          (1 << 12)
+#       define DxMODE_VLINE_INTERRUPT                     (1 << 16)
+#       define DxMODE_VLINE_INTERRUPT_TYPE                (1 << 17)
+#define DxMODE_INT_MASK                                   0x6540
+#       define D1MODE_VBLANK_INT_MASK                     (1 << 0)
+#       define D1MODE_VLINE_INT_MASK                      (1 << 4)
+#       define D2MODE_VBLANK_INT_MASK                     (1 << 8)
+#       define D2MODE_VLINE_INT_MASK                      (1 << 12)
+#define DCE3_DISP_INTERRUPT_STATUS                        0x7ddc
+#       define DC_HPD1_INTERRUPT                          (1 << 18)
+#       define DC_HPD2_INTERRUPT                          (1 << 19)
+#define DISP_INTERRUPT_STATUS                             0x7edc
+#       define LB_D1_VLINE_INTERRUPT                      (1 << 2)
+#       define LB_D2_VLINE_INTERRUPT                      (1 << 3)
+#       define LB_D1_VBLANK_INTERRUPT                     (1 << 4)
+#       define LB_D2_VBLANK_INTERRUPT                     (1 << 5)
+#       define DACA_AUTODETECT_INTERRUPT                  (1 << 16)
+#       define DACB_AUTODETECT_INTERRUPT                  (1 << 17)
+#       define DC_HOT_PLUG_DETECT1_INTERRUPT              (1 << 18)
+#       define DC_HOT_PLUG_DETECT2_INTERRUPT              (1 << 19)
+#       define DC_I2C_SW_DONE_INTERRUPT                   (1 << 20)
+#       define DC_I2C_HW_DONE_INTERRUPT                   (1 << 21)
+#define DISP_INTERRUPT_STATUS_CONTINUE                    0x7ee8
+#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE               0x7de8
+#       define DC_HPD4_INTERRUPT                          (1 << 14)
+#       define DC_HPD4_RX_INTERRUPT                       (1 << 15)
+#       define DC_HPD3_INTERRUPT                          (1 << 28)
+#       define DC_HPD1_RX_INTERRUPT                       (1 << 29)
+#       define DC_HPD2_RX_INTERRUPT                       (1 << 30)
+#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE2              0x7dec
+#       define DC_HPD3_RX_INTERRUPT                       (1 << 0)
+#       define DIGA_DP_VID_STREAM_DISABLE_INTERRUPT       (1 << 1)
+#       define DIGA_DP_STEER_FIFO_OVERFLOW_INTERRUPT      (1 << 2)
+#       define DIGB_DP_VID_STREAM_DISABLE_INTERRUPT       (1 << 3)
+#       define DIGB_DP_STEER_FIFO_OVERFLOW_INTERRUPT      (1 << 4)
+#       define AUX1_SW_DONE_INTERRUPT                     (1 << 5)
+#       define AUX1_LS_DONE_INTERRUPT                     (1 << 6)
+#       define AUX2_SW_DONE_INTERRUPT                     (1 << 7)
+#       define AUX2_LS_DONE_INTERRUPT                     (1 << 8)
+#       define AUX3_SW_DONE_INTERRUPT                     (1 << 9)
+#       define AUX3_LS_DONE_INTERRUPT                     (1 << 10)
+#       define AUX4_SW_DONE_INTERRUPT                     (1 << 11)
+#       define AUX4_LS_DONE_INTERRUPT                     (1 << 12)
+#       define DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT   (1 << 13)
+#       define DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT   (1 << 14)
+/* DCE 3.2 */
+#       define AUX5_SW_DONE_INTERRUPT                     (1 << 15)
+#       define AUX5_LS_DONE_INTERRUPT                     (1 << 16)
+#       define AUX6_SW_DONE_INTERRUPT                     (1 << 17)
+#       define AUX6_LS_DONE_INTERRUPT                     (1 << 18)
+#       define DC_HPD5_INTERRUPT                          (1 << 19)
+#       define DC_HPD5_RX_INTERRUPT                       (1 << 20)
+#       define DC_HPD6_INTERRUPT                          (1 << 21)
+#       define DC_HPD6_RX_INTERRUPT                       (1 << 22)
+
+#define DACA_AUTO_DETECT_CONTROL                          0x7828
+#define DACB_AUTO_DETECT_CONTROL                          0x7a28
+#define DCE3_DACA_AUTO_DETECT_CONTROL                     0x7028
+#define DCE3_DACB_AUTO_DETECT_CONTROL                     0x7128
+#       define DACx_AUTODETECT_MODE(x)                    ((x) << 0)
+#       define DACx_AUTODETECT_MODE_NONE                  0
+#       define DACx_AUTODETECT_MODE_CONNECT               1
+#       define DACx_AUTODETECT_MODE_DISCONNECT            2
+#       define DACx_AUTODETECT_FRAME_TIME_COUNTER(x)      ((x) << 8)
+/* bit 18 = R/C, 17 = G/Y, 16 = B/Comp */
+#       define DACx_AUTODETECT_CHECK_MASK(x)              ((x) << 16)
+
+#define DCE3_DACA_AUTODETECT_INT_CONTROL                  0x7038
+#define DCE3_DACB_AUTODETECT_INT_CONTROL                  0x7138
+#define DACA_AUTODETECT_INT_CONTROL                       0x7838
+#define DACB_AUTODETECT_INT_CONTROL                       0x7a38
+#       define DACx_AUTODETECT_ACK                        (1 << 0)
+#       define DACx_AUTODETECT_INT_ENABLE                 (1 << 16)
+
+#define DC_HOT_PLUG_DETECT1_CONTROL                       0x7d00
+#define DC_HOT_PLUG_DETECT2_CONTROL                       0x7d10
+#define DC_HOT_PLUG_DETECT3_CONTROL                       0x7d24
+#       define DC_HOT_PLUG_DETECTx_EN                     (1 << 0)
+
+#define DC_HOT_PLUG_DETECT1_INT_STATUS                    0x7d04
+#define DC_HOT_PLUG_DETECT2_INT_STATUS                    0x7d14
+#define DC_HOT_PLUG_DETECT3_INT_STATUS                    0x7d28
+#       define DC_HOT_PLUG_DETECTx_INT_STATUS             (1 << 0)
+#       define DC_HOT_PLUG_DETECTx_SENSE                  (1 << 1)
+
+/* DCE 3.0 */
+#define DC_HPD1_INT_STATUS                                0x7d00
+#define DC_HPD2_INT_STATUS                                0x7d0c
+#define DC_HPD3_INT_STATUS                                0x7d18
+#define DC_HPD4_INT_STATUS                                0x7d24
+/* DCE 3.2 */
+#define DC_HPD5_INT_STATUS                                0x7dc0
+#define DC_HPD6_INT_STATUS                                0x7df4
+#       define DC_HPDx_INT_STATUS                         (1 << 0)
+#       define DC_HPDx_SENSE                              (1 << 1)
+#       define DC_HPDx_RX_INT_STATUS                      (1 << 8)
+
+#define DC_HOT_PLUG_DETECT1_INT_CONTROL                   0x7d08
+#define DC_HOT_PLUG_DETECT2_INT_CONTROL                   0x7d18
+#define DC_HOT_PLUG_DETECT3_INT_CONTROL                   0x7d2c
+#       define DC_HOT_PLUG_DETECTx_INT_ACK                (1 << 0)
+#       define DC_HOT_PLUG_DETECTx_INT_POLARITY           (1 << 8)
+#       define DC_HOT_PLUG_DETECTx_INT_EN                 (1 << 16)
+/* DCE 3.0 */
+#define DC_HPD1_INT_CONTROL                               0x7d04
+#define DC_HPD2_INT_CONTROL                               0x7d10
+#define DC_HPD3_INT_CONTROL                               0x7d1c
+#define DC_HPD4_INT_CONTROL                               0x7d28
+/* DCE 3.2 */
+#define DC_HPD5_INT_CONTROL                               0x7dc4
+#define DC_HPD6_INT_CONTROL                               0x7df8
+#       define DC_HPDx_INT_ACK                            (1 << 0)
+#       define DC_HPDx_INT_POLARITY                       (1 << 8)
+#       define DC_HPDx_INT_EN                             (1 << 16)
+#       define DC_HPDx_RX_INT_ACK                         (1 << 20)
+#       define DC_HPDx_RX_INT_EN                          (1 << 24)
+
+/* DCE 3.0 */
+#define DC_HPD1_CONTROL                                   0x7d08
+#define DC_HPD2_CONTROL                                   0x7d14
+#define DC_HPD3_CONTROL                                   0x7d20
+#define DC_HPD4_CONTROL                                   0x7d2c
+/* DCE 3.2 */
+#define DC_HPD5_CONTROL                                   0x7dc8
+#define DC_HPD6_CONTROL                                   0x7dfc
+#       define DC_HPDx_CONNECTION_TIMER(x)                ((x) << 0)
+#       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
+/* DCE 3.2 */
+#       define DC_HPDx_EN                                 (1 << 28)
 
 /*
  * PM4
@@ -500,7 +708,6 @@
 #define	PACKET3_WAIT_REG_MEM				0x3C
 #define	PACKET3_MEM_WRITE				0x3D
 #define	PACKET3_INDIRECT_BUFFER				0x32
-#define	PACKET3_CP_INTERRUPT				0x40
 #define	PACKET3_SURFACE_SYNC				0x43
 #              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
 #              define PACKET3_TC_ACTION_ENA        (1 << 23)
@@ -674,4 +881,5 @@
 #define		S_000E60_SOFT_RESET_TSC(x)		(((x) & 1) << 16)
 #define		S_000E60_SOFT_RESET_VMC(x)		(((x) & 1) << 17)
 
+#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL		0x5480
 #endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 224506a..c938bb5 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -28,8 +28,6 @@
 #ifndef __RADEON_H__
 #define __RADEON_H__
 
-#include "radeon_object.h"
-
 /* TODO: Here are things that needs to be done :
  *	- surface allocator & initializer : (bit like scratch reg) should
  *	  initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
@@ -67,6 +65,11 @@
 #include <linux/list.h>
 #include <linux/kref.h>
 
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_module.h>
+
 #include "radeon_family.h"
 #include "radeon_mode.h"
 #include "radeon_reg.h"
@@ -85,6 +88,7 @@
 extern int radeon_testing;
 extern int radeon_connector_table;
 extern int radeon_tv;
+extern int radeon_new_pll;
 
 /*
  * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -186,76 +190,62 @@
  * Tiling registers
  */
 struct radeon_surface_reg {
-	struct radeon_object *robj;
+	struct radeon_bo *bo;
 };
 
 #define RADEON_GEM_MAX_SURFACES 8
 
 /*
- * Radeon buffer.
+ * TTM.
  */
-struct radeon_object;
+struct radeon_mman {
+	struct ttm_bo_global_ref        bo_global_ref;
+	struct ttm_global_reference	mem_global_ref;
+	bool				mem_global_referenced;
+	struct ttm_bo_device		bdev;
+};
 
-struct radeon_object_list {
+struct radeon_bo {
+	/* Protected by gem.mutex */
+	struct list_head		list;
+	/* Protected by tbo.reserved */
+	u32				placements[3];
+	struct ttm_placement		placement;
+	struct ttm_buffer_object	tbo;
+	struct ttm_bo_kmap_obj		kmap;
+	unsigned			pin_count;
+	void				*kptr;
+	u32				tiling_flags;
+	u32				pitch;
+	int				surface_reg;
+	/* Constant after initialization */
+	struct radeon_device		*rdev;
+	struct drm_gem_object		*gobj;
+};
+
+struct radeon_bo_list {
 	struct list_head	list;
-	struct radeon_object	*robj;
+	struct radeon_bo	*bo;
 	uint64_t		gpu_offset;
 	unsigned		rdomain;
 	unsigned		wdomain;
-	uint32_t                tiling_flags;
+	u32			tiling_flags;
 };
 
-int radeon_object_init(struct radeon_device *rdev);
-void radeon_object_fini(struct radeon_device *rdev);
-int radeon_object_create(struct radeon_device *rdev,
-			 struct drm_gem_object *gobj,
-			 unsigned long size,
-			 bool kernel,
-			 uint32_t domain,
-			 bool interruptible,
-			 struct radeon_object **robj_ptr);
-int radeon_object_kmap(struct radeon_object *robj, void **ptr);
-void radeon_object_kunmap(struct radeon_object *robj);
-void radeon_object_unref(struct radeon_object **robj);
-int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
-		      uint64_t *gpu_addr);
-void radeon_object_unpin(struct radeon_object *robj);
-int radeon_object_wait(struct radeon_object *robj);
-int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement);
-int radeon_object_evict_vram(struct radeon_device *rdev);
-int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset);
-void radeon_object_force_delete(struct radeon_device *rdev);
-void radeon_object_list_add_object(struct radeon_object_list *lobj,
-				   struct list_head *head);
-int radeon_object_list_validate(struct list_head *head, void *fence);
-void radeon_object_list_unvalidate(struct list_head *head);
-void radeon_object_list_clean(struct list_head *head);
-int radeon_object_fbdev_mmap(struct radeon_object *robj,
-			     struct vm_area_struct *vma);
-unsigned long radeon_object_size(struct radeon_object *robj);
-void radeon_object_clear_surface_reg(struct radeon_object *robj);
-int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
-			       bool force_drop);
-void radeon_object_set_tiling_flags(struct radeon_object *robj,
-				    uint32_t tiling_flags, uint32_t pitch);
-void radeon_object_get_tiling_flags(struct radeon_object *robj, uint32_t *tiling_flags, uint32_t *pitch);
-void radeon_bo_move_notify(struct ttm_buffer_object *bo,
-			   struct ttm_mem_reg *mem);
-void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 /*
  * GEM objects.
  */
 struct radeon_gem {
+	struct mutex		mutex;
 	struct list_head	objects;
 };
 
 int radeon_gem_init(struct radeon_device *rdev);
 void radeon_gem_fini(struct radeon_device *rdev);
 int radeon_gem_object_create(struct radeon_device *rdev, int size,
-			     int alignment, int initial_domain,
-			     bool discardable, bool kernel,
-			     bool interruptible,
-			     struct drm_gem_object **obj);
+				int alignment, int initial_domain,
+				bool discardable, bool kernel,
+				struct drm_gem_object **obj);
 int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
 			  uint64_t *gpu_addr);
 void radeon_gem_object_unpin(struct drm_gem_object *obj);
@@ -271,7 +261,7 @@
 };
 
 struct radeon_gart_table_vram {
-	struct radeon_object		*robj;
+	struct radeon_bo		*robj;
 	volatile uint32_t		*ptr;
 };
 
@@ -352,11 +342,16 @@
 	bool		sw_int;
 	/* FIXME: use a define max crtc rather than hardcode it */
 	bool		crtc_vblank_int[2];
+	/* FIXME: use defines for max hpd/dacs */
+	bool            hpd[6];
+	spinlock_t sw_lock;
+	int sw_refcount;
 };
 
 int radeon_irq_kms_init(struct radeon_device *rdev);
 void radeon_irq_kms_fini(struct radeon_device *rdev);
-
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
 
 /*
  * CP & ring.
@@ -376,7 +371,7 @@
  */
 struct radeon_ib_pool {
 	struct mutex		mutex;
-	struct radeon_object	*robj;
+	struct radeon_bo	*robj;
 	struct list_head	scheduled_ibs;
 	struct radeon_ib	ibs[RADEON_IB_POOL_SIZE];
 	bool			ready;
@@ -384,7 +379,7 @@
 };
 
 struct radeon_cp {
-	struct radeon_object	*ring_obj;
+	struct radeon_bo	*ring_obj;
 	volatile uint32_t	*ring;
 	unsigned		rptr;
 	unsigned		wptr;
@@ -399,8 +394,25 @@
 	bool			ready;
 };
 
+/*
+ * R6xx+ IH ring
+ */
+struct r600_ih {
+	struct radeon_bo	*ring_obj;
+	volatile uint32_t	*ring;
+	unsigned		rptr;
+	unsigned		wptr;
+	unsigned		wptr_old;
+	unsigned		ring_size;
+	uint64_t		gpu_addr;
+	uint32_t		align_mask;
+	uint32_t		ptr_mask;
+	spinlock_t              lock;
+	bool                    enabled;
+};
+
 struct r600_blit {
-	struct radeon_object	*shader_obj;
+	struct radeon_bo	*shader_obj;
 	u64 shader_gpu_addr;
 	u32 vs_offset, ps_offset;
 	u32 state_offset;
@@ -430,8 +442,8 @@
  */
 struct radeon_cs_reloc {
 	struct drm_gem_object		*gobj;
-	struct radeon_object		*robj;
-	struct radeon_object_list	lobj;
+	struct radeon_bo		*robj;
+	struct radeon_bo_list		lobj;
 	uint32_t			handle;
 	uint32_t			flags;
 };
@@ -527,7 +539,7 @@
  * Writeback
  */
 struct radeon_wb {
-	struct radeon_object	*wb_obj;
+	struct radeon_bo	*wb_obj;
 	volatile uint32_t	*wb;
 	uint64_t		gpu_addr;
 };
@@ -639,6 +651,11 @@
 			       uint32_t offset, uint32_t obj_size);
 	int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
 	void (*bandwidth_update)(struct radeon_device *rdev);
+	void (*hdp_flush)(struct radeon_device *rdev);
+	void (*hpd_init)(struct radeon_device *rdev);
+	void (*hpd_fini)(struct radeon_device *rdev);
+	bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+	void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
 };
 
 /*
@@ -751,9 +768,9 @@
 	uint8_t				*bios;
 	bool				is_atom_bios;
 	uint16_t			bios_header_start;
-	struct radeon_object		*stollen_vga_memory;
+	struct radeon_bo		*stollen_vga_memory;
 	struct fb_info			*fbdev_info;
-	struct radeon_object		*fbdev_robj;
+	struct radeon_bo		*fbdev_rbo;
 	struct radeon_framebuffer	*fbdev_rfb;
 	/* Register mmio */
 	resource_size_t			rmmio_base;
@@ -791,8 +808,12 @@
 	struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
 	const struct firmware *me_fw;	/* all family ME firmware */
 	const struct firmware *pfp_fw;	/* r6/700 PFP firmware */
+	const struct firmware *rlc_fw;	/* r6/700 RLC firmware */
 	struct r600_blit r600_blit;
 	int msi_enabled; /* msi enabled */
+	struct r600_ih ih; /* r6/700 interrupt ring */
+	struct workqueue_struct *wq;
+	struct work_struct hotplug_work;
 };
 
 int radeon_device_init(struct radeon_device *rdev,
@@ -829,6 +850,10 @@
 	}
 }
 
+/*
+ * Cast helper
+ */
+#define to_radeon_fence(p) ((struct radeon_fence *)(p))
 
 /*
  * Registers read & write functions.
@@ -965,18 +990,24 @@
 #define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev))
 #define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
 #define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev))
-#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
+#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e))
 #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
 #define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
 #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
 #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
 #define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
+#define radeon_hdp_flush(rdev) (rdev)->asic->hdp_flush((rdev))
+#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev))
+#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
+#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
+#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
 
 /* Common functions */
 extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
 extern int radeon_modeset_init(struct radeon_device *rdev);
 extern void radeon_modeset_fini(struct radeon_device *rdev);
 extern bool radeon_card_posted(struct radeon_device *rdev);
+extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
 extern int radeon_clocks_init(struct radeon_device *rdev);
 extern void radeon_clocks_fini(struct radeon_device *rdev);
 extern void radeon_scratch_init(struct radeon_device *rdev);
@@ -984,6 +1015,7 @@
 extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
 extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
 extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
+extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
 
 /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
 struct r100_mc_save {
@@ -1021,7 +1053,7 @@
 extern void r100_vga_render_disable(struct radeon_device *rdev);
 extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
 						struct radeon_cs_packet *pkt,
-						struct radeon_object *robj);
+						struct radeon_bo *robj);
 extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
 				struct radeon_cs_packet *pkt,
 				const unsigned *auth, unsigned n,
@@ -1029,6 +1061,8 @@
 extern int r100_cs_packet_parse(struct radeon_cs_parser *p,
 				struct radeon_cs_packet *pkt,
 				unsigned idx);
+extern void r100_enable_bm(struct radeon_device *rdev);
+extern void r100_set_common_regs(struct radeon_device *rdev);
 
 /* rv200,rv250,rv280 */
 extern void r200_set_safe_registers(struct radeon_device *rdev);
@@ -1104,7 +1138,14 @@
 extern void r600_scratch_init(struct radeon_device *rdev);
 extern int r600_blit_init(struct radeon_device *rdev);
 extern void r600_blit_fini(struct radeon_device *rdev);
-extern int r600_cp_init_microcode(struct radeon_device *rdev);
+extern int r600_init_microcode(struct radeon_device *rdev);
 extern int r600_gpu_reset(struct radeon_device *rdev);
+/* r600 irq */
+extern int r600_irq_init(struct radeon_device *rdev);
+extern void r600_irq_fini(struct radeon_device *rdev);
+extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
+extern int r600_irq_set(struct radeon_device *rdev);
+
+#include "radeon_object.h"
 
 #endif
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index c18fbee..636116b 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -76,6 +76,12 @@
 void r100_bandwidth_update(struct radeon_device *rdev);
 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 int r100_ring_test(struct radeon_device *rdev);
+void r100_hdp_flush(struct radeon_device *rdev);
+void r100_hpd_init(struct radeon_device *rdev);
+void r100_hpd_fini(struct radeon_device *rdev);
+bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void r100_hpd_set_polarity(struct radeon_device *rdev,
+			   enum radeon_hpd_id hpd);
 
 static struct radeon_asic r100_asic = {
 	.init = &r100_init,
@@ -107,6 +113,11 @@
 	.set_surface_reg = r100_set_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.bandwidth_update = &r100_bandwidth_update,
+	.hdp_flush = &r100_hdp_flush,
+	.hpd_init = &r100_hpd_init,
+	.hpd_fini = &r100_hpd_fini,
+	.hpd_sense = &r100_hpd_sense,
+	.hpd_set_polarity = &r100_hpd_set_polarity,
 };
 
 
@@ -162,6 +173,11 @@
 	.set_surface_reg = r100_set_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.bandwidth_update = &r100_bandwidth_update,
+	.hdp_flush = &r100_hdp_flush,
+	.hpd_init = &r100_hpd_init,
+	.hpd_fini = &r100_hpd_fini,
+	.hpd_sense = &r100_hpd_sense,
+	.hpd_set_polarity = &r100_hpd_set_polarity,
 };
 
 /*
@@ -201,6 +217,11 @@
 	.set_surface_reg = r100_set_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.bandwidth_update = &r100_bandwidth_update,
+	.hdp_flush = &r100_hdp_flush,
+	.hpd_init = &r100_hpd_init,
+	.hpd_fini = &r100_hpd_fini,
+	.hpd_sense = &r100_hpd_sense,
+	.hpd_set_polarity = &r100_hpd_set_polarity,
 };
 
 
@@ -245,6 +266,11 @@
 	.set_surface_reg = r100_set_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.bandwidth_update = &r100_bandwidth_update,
+	.hdp_flush = &r100_hdp_flush,
+	.hpd_init = &r100_hpd_init,
+	.hpd_fini = &r100_hpd_fini,
+	.hpd_sense = &r100_hpd_sense,
+	.hpd_set_polarity = &r100_hpd_set_polarity,
 };
 
 
@@ -263,6 +289,12 @@
 uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
 void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 void rs600_bandwidth_update(struct radeon_device *rdev);
+void rs600_hpd_init(struct radeon_device *rdev);
+void rs600_hpd_fini(struct radeon_device *rdev);
+bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void rs600_hpd_set_polarity(struct radeon_device *rdev,
+			    enum radeon_hpd_id hpd);
+
 static struct radeon_asic rs600_asic = {
 	.init = &rs600_init,
 	.fini = &rs600_fini,
@@ -291,6 +323,11 @@
 	.set_pcie_lanes = NULL,
 	.set_clock_gating = &radeon_atom_set_clock_gating,
 	.bandwidth_update = &rs600_bandwidth_update,
+	.hdp_flush = &r100_hdp_flush,
+	.hpd_init = &rs600_hpd_init,
+	.hpd_fini = &rs600_hpd_fini,
+	.hpd_sense = &rs600_hpd_sense,
+	.hpd_set_polarity = &rs600_hpd_set_polarity,
 };
 
 
@@ -334,6 +371,11 @@
 	.set_surface_reg = r100_set_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.bandwidth_update = &rs690_bandwidth_update,
+	.hdp_flush = &r100_hdp_flush,
+	.hpd_init = &rs600_hpd_init,
+	.hpd_fini = &rs600_hpd_fini,
+	.hpd_sense = &rs600_hpd_sense,
+	.hpd_set_polarity = &rs600_hpd_set_polarity,
 };
 
 
@@ -381,6 +423,11 @@
 	.set_surface_reg = r100_set_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.bandwidth_update = &rv515_bandwidth_update,
+	.hdp_flush = &r100_hdp_flush,
+	.hpd_init = &rs600_hpd_init,
+	.hpd_fini = &rs600_hpd_fini,
+	.hpd_sense = &rs600_hpd_sense,
+	.hpd_set_polarity = &rs600_hpd_set_polarity,
 };
 
 
@@ -419,6 +466,11 @@
 	.set_surface_reg = r100_set_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.bandwidth_update = &rv515_bandwidth_update,
+	.hdp_flush = &r100_hdp_flush,
+	.hpd_init = &rs600_hpd_init,
+	.hpd_fini = &rs600_hpd_fini,
+	.hpd_sense = &rs600_hpd_sense,
+	.hpd_set_polarity = &rs600_hpd_set_polarity,
 };
 
 /*
@@ -455,6 +507,12 @@
 int r600_copy_blit(struct radeon_device *rdev,
 		   uint64_t src_offset, uint64_t dst_offset,
 		   unsigned num_pages, struct radeon_fence *fence);
+void r600_hdp_flush(struct radeon_device *rdev);
+void r600_hpd_init(struct radeon_device *rdev);
+void r600_hpd_fini(struct radeon_device *rdev);
+bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void r600_hpd_set_polarity(struct radeon_device *rdev,
+			   enum radeon_hpd_id hpd);
 
 static struct radeon_asic r600_asic = {
 	.init = &r600_init,
@@ -470,6 +528,7 @@
 	.ring_ib_execute = &r600_ring_ib_execute,
 	.irq_set = &r600_irq_set,
 	.irq_process = &r600_irq_process,
+	.get_vblank_counter = &rs600_get_vblank_counter,
 	.fence_ring_emit = &r600_fence_ring_emit,
 	.cs_parse = &r600_cs_parse,
 	.copy_blit = &r600_copy_blit,
@@ -484,6 +543,11 @@
 	.set_surface_reg = r600_set_surface_reg,
 	.clear_surface_reg = r600_clear_surface_reg,
 	.bandwidth_update = &rv515_bandwidth_update,
+	.hdp_flush = &r600_hdp_flush,
+	.hpd_init = &r600_hpd_init,
+	.hpd_fini = &r600_hpd_fini,
+	.hpd_sense = &r600_hpd_sense,
+	.hpd_set_polarity = &r600_hpd_set_polarity,
 };
 
 /*
@@ -509,6 +573,7 @@
 	.ring_ib_execute = &r600_ring_ib_execute,
 	.irq_set = &r600_irq_set,
 	.irq_process = &r600_irq_process,
+	.get_vblank_counter = &rs600_get_vblank_counter,
 	.fence_ring_emit = &r600_fence_ring_emit,
 	.cs_parse = &r600_cs_parse,
 	.copy_blit = &r600_copy_blit,
@@ -523,6 +588,11 @@
 	.set_surface_reg = r600_set_surface_reg,
 	.clear_surface_reg = r600_clear_surface_reg,
 	.bandwidth_update = &rv515_bandwidth_update,
+	.hdp_flush = &r600_hdp_flush,
+	.hpd_init = &r600_hpd_init,
+	.hpd_fini = &r600_hpd_fini,
+	.hpd_sense = &r600_hpd_sense,
+	.hpd_set_polarity = &r600_hpd_set_polarity,
 };
 
 #endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 2ed88a8..12a0c76 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -47,7 +47,8 @@
 			  int connector_type,
 			  struct radeon_i2c_bus_rec *i2c_bus,
 			  bool linkb, uint32_t igp_lane_info,
-			  uint16_t connector_object_id);
+			  uint16_t connector_object_id,
+			  struct radeon_hpd *hpd);
 
 /* from radeon_legacy_encoder.c */
 extern void
@@ -60,16 +61,16 @@
 	struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
 };
 
-static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device
-							   *dev, uint8_t id)
+static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
+							       uint8_t id)
 {
-	struct radeon_device *rdev = dev->dev_private;
 	struct atom_context *ctx = rdev->mode_info.atom_context;
-	ATOM_GPIO_I2C_ASSIGMENT gpio;
+	ATOM_GPIO_I2C_ASSIGMENT *gpio;
 	struct radeon_i2c_bus_rec i2c;
 	int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
 	struct _ATOM_GPIO_I2C_INFO *i2c_info;
 	uint16_t data_offset;
+	int i;
 
 	memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
 	i2c.valid = false;
@@ -78,34 +79,121 @@
 
 	i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
 
-	gpio = i2c_info->asGPIO_Info[id];
 
-	i2c.mask_clk_reg = le16_to_cpu(gpio.usClkMaskRegisterIndex) * 4;
-	i2c.mask_data_reg = le16_to_cpu(gpio.usDataMaskRegisterIndex) * 4;
-	i2c.put_clk_reg = le16_to_cpu(gpio.usClkEnRegisterIndex) * 4;
-	i2c.put_data_reg = le16_to_cpu(gpio.usDataEnRegisterIndex) * 4;
-	i2c.get_clk_reg = le16_to_cpu(gpio.usClkY_RegisterIndex) * 4;
-	i2c.get_data_reg = le16_to_cpu(gpio.usDataY_RegisterIndex) * 4;
-	i2c.a_clk_reg = le16_to_cpu(gpio.usClkA_RegisterIndex) * 4;
-	i2c.a_data_reg = le16_to_cpu(gpio.usDataA_RegisterIndex) * 4;
-	i2c.mask_clk_mask = (1 << gpio.ucClkMaskShift);
-	i2c.mask_data_mask = (1 << gpio.ucDataMaskShift);
-	i2c.put_clk_mask = (1 << gpio.ucClkEnShift);
-	i2c.put_data_mask = (1 << gpio.ucDataEnShift);
-	i2c.get_clk_mask = (1 << gpio.ucClkY_Shift);
-	i2c.get_data_mask = (1 << gpio.ucDataY_Shift);
-	i2c.a_clk_mask = (1 << gpio.ucClkA_Shift);
-	i2c.a_data_mask = (1 << gpio.ucDataA_Shift);
-	i2c.valid = true;
+	for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+		gpio = &i2c_info->asGPIO_Info[i];
+
+		if (gpio->sucI2cId.ucAccess == id) {
+			i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
+			i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
+			i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
+			i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
+			i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
+			i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
+			i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
+			i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
+			i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
+			i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
+			i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
+			i2c.en_data_mask = (1 << gpio->ucDataEnShift);
+			i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
+			i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
+			i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
+			i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
+
+			if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
+				i2c.hw_capable = true;
+			else
+				i2c.hw_capable = false;
+
+			if (gpio->sucI2cId.ucAccess == 0xa0)
+				i2c.mm_i2c = true;
+			else
+				i2c.mm_i2c = false;
+
+			i2c.i2c_id = gpio->sucI2cId.ucAccess;
+
+			i2c.valid = true;
+		}
+	}
 
 	return i2c;
 }
 
+static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
+							u8 id)
+{
+	struct atom_context *ctx = rdev->mode_info.atom_context;
+	struct radeon_gpio_rec gpio;
+	int index = GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT);
+	struct _ATOM_GPIO_PIN_LUT *gpio_info;
+	ATOM_GPIO_PIN_ASSIGNMENT *pin;
+	u16 data_offset, size;
+	int i, num_indices;
+
+	memset(&gpio, 0, sizeof(struct radeon_gpio_rec));
+	gpio.valid = false;
+
+	atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset);
+
+	gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset);
+
+	num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
+
+	for (i = 0; i < num_indices; i++) {
+		pin = &gpio_info->asGPIO_Pin[i];
+		if (id == pin->ucGPIO_ID) {
+			gpio.id = pin->ucGPIO_ID;
+			gpio.reg = pin->usGpioPin_AIndex * 4;
+			gpio.mask = (1 << pin->ucGpioPinBitShift);
+			gpio.valid = true;
+			break;
+		}
+	}
+
+	return gpio;
+}
+
+static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device *rdev,
+							    struct radeon_gpio_rec *gpio)
+{
+	struct radeon_hpd hpd;
+	hpd.gpio = *gpio;
+	if (gpio->reg == AVIVO_DC_GPIO_HPD_A) {
+		switch(gpio->mask) {
+		case (1 << 0):
+			hpd.hpd = RADEON_HPD_1;
+			break;
+		case (1 << 8):
+			hpd.hpd = RADEON_HPD_2;
+			break;
+		case (1 << 16):
+			hpd.hpd = RADEON_HPD_3;
+			break;
+		case (1 << 24):
+			hpd.hpd = RADEON_HPD_4;
+			break;
+		case (1 << 26):
+			hpd.hpd = RADEON_HPD_5;
+			break;
+		case (1 << 28):
+			hpd.hpd = RADEON_HPD_6;
+			break;
+		default:
+			hpd.hpd = RADEON_HPD_NONE;
+			break;
+		}
+	} else
+		hpd.hpd = RADEON_HPD_NONE;
+	return hpd;
+}
+
 static bool radeon_atom_apply_quirks(struct drm_device *dev,
 				     uint32_t supported_device,
 				     int *connector_type,
 				     struct radeon_i2c_bus_rec *i2c_bus,
-				     uint16_t *line_mux)
+				     uint16_t *line_mux,
+				     struct radeon_hpd *hpd)
 {
 
 	/* Asus M2A-VM HDMI board lists the DVI port as HDMI */
@@ -135,6 +223,23 @@
 		}
 	}
 
+	/* HIS X1300 is DVI+VGA, not DVI+DVI */
+	if ((dev->pdev->device == 0x7146) &&
+	    (dev->pdev->subsystem_vendor == 0x17af) &&
+	    (dev->pdev->subsystem_device == 0x2058)) {
+		if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
+			return false;
+	}
+
+	/* Gigabyte X1300 is DVI+VGA, not DVI+DVI */
+	if ((dev->pdev->device == 0x7142) &&
+	    (dev->pdev->subsystem_vendor == 0x1458) &&
+	    (dev->pdev->subsystem_device == 0x2134)) {
+		if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
+			return false;
+	}
+
+
 	/* Funky macbooks */
 	if ((dev->pdev->device == 0x71C5) &&
 	    (dev->pdev->subsystem_vendor == 0x106b) &&
@@ -172,6 +277,15 @@
 		}
 	}
 
+	/* Acer laptop reports DVI-D as DVI-I */
+	if ((dev->pdev->device == 0x95c4) &&
+	    (dev->pdev->subsystem_vendor == 0x1025) &&
+	    (dev->pdev->subsystem_device == 0x013c)) {
+		if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
+		    (supported_device == ATOM_DEVICE_DFP1_SUPPORT))
+			*connector_type = DRM_MODE_CONNECTOR_DVID;
+	}
+
 	return true;
 }
 
@@ -240,16 +354,18 @@
 	struct radeon_mode_info *mode_info = &rdev->mode_info;
 	struct atom_context *ctx = mode_info->atom_context;
 	int index = GetIndexIntoMasterTable(DATA, Object_Header);
-	uint16_t size, data_offset;
-	uint8_t frev, crev, line_mux = 0;
+	u16 size, data_offset;
+	u8 frev, crev;
 	ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
 	ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
 	ATOM_OBJECT_HEADER *obj_header;
 	int i, j, path_size, device_support;
 	int connector_type;
-	uint16_t igp_lane_info, conn_id, connector_object_id;
+	u16 igp_lane_info, conn_id, connector_object_id;
 	bool linkb;
 	struct radeon_i2c_bus_rec ddc_bus;
+	struct radeon_gpio_rec gpio;
+	struct radeon_hpd hpd;
 
 	atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
 
@@ -276,7 +392,6 @@
 		path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
 		path_size += le16_to_cpu(path->usSize);
 		linkb = false;
-
 		if (device_support & le16_to_cpu(path->usDeviceTag)) {
 			uint8_t con_obj_id, con_obj_num, con_obj_type;
 
@@ -377,10 +492,9 @@
 				}
 			}
 
-			/* look up gpio for ddc */
+			/* look up gpio for ddc, hpd */
 			if ((le16_to_cpu(path->usDeviceTag) &
-			     (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
-			    == 0) {
+			     (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
 				for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
 					if (le16_to_cpu(path->usConnObjectId) ==
 					    le16_to_cpu(con_obj->asObjects[j].
@@ -394,21 +508,34 @@
 								 asObjects[j].
 								 usRecordOffset));
 						ATOM_I2C_RECORD *i2c_record;
+						ATOM_HPD_INT_RECORD *hpd_record;
+						ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
+						hpd.hpd = RADEON_HPD_NONE;
 
 						while (record->ucRecordType > 0
 						       && record->
 						       ucRecordType <=
 						       ATOM_MAX_OBJECT_RECORD_NUMBER) {
-							switch (record->
-								ucRecordType) {
+							switch (record->ucRecordType) {
 							case ATOM_I2C_RECORD_TYPE:
 								i2c_record =
-								    (ATOM_I2C_RECORD
-								     *) record;
-								line_mux =
-								    i2c_record->
-								    sucI2cId.
-								    bfI2C_LineMux;
+								    (ATOM_I2C_RECORD *)
+									record;
+								i2c_config =
+									(ATOM_I2C_ID_CONFIG_ACCESS *)
+									&i2c_record->sucI2cId;
+								ddc_bus = radeon_lookup_i2c_gpio(rdev,
+												 i2c_config->
+												 ucAccess);
+								break;
+							case ATOM_HPD_INT_RECORD_TYPE:
+								hpd_record =
+									(ATOM_HPD_INT_RECORD *)
+									record;
+								gpio = radeon_lookup_gpio(rdev,
+											  hpd_record->ucHPDIntGPIOID);
+								hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
+								hpd.plugged_state = hpd_record->ucPlugged_PinState;
 								break;
 							}
 							record =
@@ -421,24 +548,16 @@
 						break;
 					}
 				}
-			} else
-				line_mux = 0;
-
-			if ((le16_to_cpu(path->usDeviceTag) ==
-			     ATOM_DEVICE_TV1_SUPPORT)
-			    || (le16_to_cpu(path->usDeviceTag) ==
-				ATOM_DEVICE_TV2_SUPPORT)
-			    || (le16_to_cpu(path->usDeviceTag) ==
-				ATOM_DEVICE_CV_SUPPORT))
+			} else {
+				hpd.hpd = RADEON_HPD_NONE;
 				ddc_bus.valid = false;
-			else
-				ddc_bus = radeon_lookup_gpio(dev, line_mux);
+			}
 
 			conn_id = le16_to_cpu(path->usConnObjectId);
 
 			if (!radeon_atom_apply_quirks
 			    (dev, le16_to_cpu(path->usDeviceTag), &connector_type,
-			     &ddc_bus, &conn_id))
+			     &ddc_bus, &conn_id, &hpd))
 				continue;
 
 			radeon_add_atom_connector(dev,
@@ -447,7 +566,8 @@
 							      usDeviceTag),
 						  connector_type, &ddc_bus,
 						  linkb, igp_lane_info,
-						  connector_object_id);
+						  connector_object_id,
+						  &hpd);
 
 		}
 	}
@@ -502,6 +622,7 @@
 	uint16_t devices;
 	int connector_type;
 	struct radeon_i2c_bus_rec ddc_bus;
+	struct radeon_hpd hpd;
 };
 
 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
@@ -517,7 +638,7 @@
 	uint16_t device_support;
 	uint8_t dac;
 	union atom_supported_devices *supported_devices;
-	int i, j;
+	int i, j, max_device;
 	struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
 
 	atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
@@ -527,7 +648,12 @@
 
 	device_support = le16_to_cpu(supported_devices->info.usDeviceSupport);
 
-	for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+	if (frev > 1)
+		max_device = ATOM_MAX_SUPPORTED_DEVICE;
+	else
+		max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
+
+	for (i = 0; i < max_device; i++) {
 		ATOM_CONNECTOR_INFO_I2C ci =
 		    supported_devices->info.asConnInfo[i];
 
@@ -553,22 +679,8 @@
 
 		dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC;
 
-		if ((rdev->family == CHIP_RS690) ||
-		    (rdev->family == CHIP_RS740)) {
-			if ((i == ATOM_DEVICE_DFP2_INDEX)
-			    && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 2))
-				bios_connectors[i].line_mux =
-				    ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
-			else if ((i == ATOM_DEVICE_DFP3_INDEX)
-				 && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 1))
-				bios_connectors[i].line_mux =
-				    ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
-			else
-				bios_connectors[i].line_mux =
-				    ci.sucI2cId.sbfAccess.bfI2C_LineMux;
-		} else
-			bios_connectors[i].line_mux =
-			    ci.sucI2cId.sbfAccess.bfI2C_LineMux;
+		bios_connectors[i].line_mux =
+			ci.sucI2cId.ucAccess;
 
 		/* give tv unique connector ids */
 		if (i == ATOM_DEVICE_TV1_INDEX) {
@@ -582,8 +694,30 @@
 			bios_connectors[i].line_mux = 52;
 		} else
 			bios_connectors[i].ddc_bus =
-			    radeon_lookup_gpio(dev,
-					       bios_connectors[i].line_mux);
+			    radeon_lookup_i2c_gpio(rdev,
+						   bios_connectors[i].line_mux);
+
+		if ((crev > 1) && (frev > 1)) {
+			u8 isb = supported_devices->info_2d1.asIntSrcInfo[i].ucIntSrcBitmap;
+			switch (isb) {
+			case 0x4:
+				bios_connectors[i].hpd.hpd = RADEON_HPD_1;
+				break;
+			case 0xa:
+				bios_connectors[i].hpd.hpd = RADEON_HPD_2;
+				break;
+			default:
+				bios_connectors[i].hpd.hpd = RADEON_HPD_NONE;
+				break;
+			}
+		} else {
+			if (i == ATOM_DEVICE_DFP1_INDEX)
+				bios_connectors[i].hpd.hpd = RADEON_HPD_1;
+			else if (i == ATOM_DEVICE_DFP2_INDEX)
+				bios_connectors[i].hpd.hpd = RADEON_HPD_2;
+			else
+				bios_connectors[i].hpd.hpd = RADEON_HPD_NONE;
+		}
 
 		/* Always set the connector type to VGA for CRT1/CRT2. if they are
 		 * shared with a DVI port, we'll pick up the DVI connector when we
@@ -595,7 +729,8 @@
 
 		if (!radeon_atom_apply_quirks
 		    (dev, (1 << i), &bios_connectors[i].connector_type,
-		     &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux))
+		     &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux,
+		     &bios_connectors[i].hpd))
 			continue;
 
 		bios_connectors[i].valid = true;
@@ -617,9 +752,9 @@
 	}
 
 	/* combine shared connectors */
-	for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+	for (i = 0; i < max_device; i++) {
 		if (bios_connectors[i].valid) {
-			for (j = 0; j < ATOM_MAX_SUPPORTED_DEVICE; j++) {
+			for (j = 0; j < max_device; j++) {
 				if (bios_connectors[j].valid && (i != j)) {
 					if (bios_connectors[i].line_mux ==
 					    bios_connectors[j].line_mux) {
@@ -643,6 +778,10 @@
 							bios_connectors[i].
 							    connector_type =
 							    DRM_MODE_CONNECTOR_DVII;
+							if (bios_connectors[j].devices &
+							    (ATOM_DEVICE_DFP_SUPPORT))
+								bios_connectors[i].hpd =
+									bios_connectors[j].hpd;
 							bios_connectors[j].
 							    valid = false;
 						}
@@ -653,7 +792,7 @@
 	}
 
 	/* add the connectors */
-	for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+	for (i = 0; i < max_device; i++) {
 		if (bios_connectors[i].valid) {
 			uint16_t connector_object_id =
 				atombios_get_connector_object_id(dev,
@@ -666,7 +805,8 @@
 						  connector_type,
 						  &bios_connectors[i].ddc_bus,
 						  false, 0,
-						  connector_object_id);
+						  connector_object_id,
+						  &bios_connectors[i].hpd);
 		}
 	}
 
@@ -731,7 +871,8 @@
 			 * pre-DCE 3.0 r6xx hardware.  This might need to be adjusted per
 			 * family.
 			 */
-			p1pll->pll_out_min = 64800;
+			if (!radeon_new_pll)
+				p1pll->pll_out_min = 64800;
 		}
 
 		p1pll->pll_in_min =
@@ -861,6 +1002,7 @@
 	struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
 	uint8_t frev, crev;
 	struct radeon_atom_ss *ss = NULL;
+	int i;
 
 	if (id > ATOM_MAX_SS_ENTRY)
 		return NULL;
@@ -878,12 +1020,17 @@
 		if (!ss)
 			return NULL;
 
-		ss->percentage = le16_to_cpu(ss_info->asSS_Info[id].usSpreadSpectrumPercentage);
-		ss->type = ss_info->asSS_Info[id].ucSpreadSpectrumType;
-		ss->step = ss_info->asSS_Info[id].ucSS_Step;
-		ss->delay = ss_info->asSS_Info[id].ucSS_Delay;
-		ss->range = ss_info->asSS_Info[id].ucSS_Range;
-		ss->refdiv = ss_info->asSS_Info[id].ucRecommendedRef_Div;
+		for (i = 0; i < ATOM_MAX_SS_ENTRY; i++) {
+			if (ss_info->asSS_Info[i].ucSS_Id == id) {
+				ss->percentage =
+					le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
+				ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType;
+				ss->step = ss_info->asSS_Info[i].ucSS_Step;
+				ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
+				ss->range = ss_info->asSS_Info[i].ucSS_Range;
+				ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
+			}
+		}
 	}
 	return ss;
 }
@@ -901,7 +1048,7 @@
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_mode_info *mode_info = &rdev->mode_info;
 	int index = GetIndexIntoMasterTable(DATA, LVDS_Info);
-	uint16_t data_offset;
+	uint16_t data_offset, misc;
 	union lvds_info *lvds_info;
 	uint8_t frev, crev;
 	struct radeon_encoder_atom_dig *lvds = NULL;
@@ -940,6 +1087,19 @@
 		lvds->panel_pwr_delay =
 		    le16_to_cpu(lvds_info->info.usOffDelayInMs);
 		lvds->lvds_misc = lvds_info->info.ucLVDS_Misc;
+
+		misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess);
+		if (misc & ATOM_VSYNC_POLARITY)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+		if (misc & ATOM_HSYNC_POLARITY)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+		if (misc & ATOM_COMPOSITESYNC)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_CSYNC;
+		if (misc & ATOM_INTERLACE)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_INTERLACE;
+		if (misc & ATOM_DOUBLE_CLOCK_MODE)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
+
 		/* set crtc values */
 		drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
 
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 10bd50a..4ddfd4b 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -29,8 +29,8 @@
 void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
 			   unsigned sdomain, unsigned ddomain)
 {
-	struct radeon_object *dobj = NULL;
-	struct radeon_object *sobj = NULL;
+	struct radeon_bo *dobj = NULL;
+	struct radeon_bo *sobj = NULL;
 	struct radeon_fence *fence = NULL;
 	uint64_t saddr, daddr;
 	unsigned long start_jiffies;
@@ -41,19 +41,27 @@
 
 	size = bsize;
 	n = 1024;
-	r = radeon_object_create(rdev, NULL, size, true, sdomain, false, &sobj);
+	r = radeon_bo_create(rdev, NULL, size, true, sdomain, &sobj);
 	if (r) {
 		goto out_cleanup;
 	}
-	r = radeon_object_pin(sobj, sdomain, &saddr);
+	r = radeon_bo_reserve(sobj, false);
+	if (unlikely(r != 0))
+		goto out_cleanup;
+	r = radeon_bo_pin(sobj, sdomain, &saddr);
+	radeon_bo_unreserve(sobj);
 	if (r) {
 		goto out_cleanup;
 	}
-	r = radeon_object_create(rdev, NULL, size, true, ddomain, false, &dobj);
+	r = radeon_bo_create(rdev, NULL, size, true, ddomain, &dobj);
 	if (r) {
 		goto out_cleanup;
 	}
-	r = radeon_object_pin(dobj, ddomain, &daddr);
+	r = radeon_bo_reserve(dobj, false);
+	if (unlikely(r != 0))
+		goto out_cleanup;
+	r = radeon_bo_pin(dobj, ddomain, &daddr);
+	radeon_bo_unreserve(dobj);
 	if (r) {
 		goto out_cleanup;
 	}
@@ -109,12 +117,20 @@
 	}
 out_cleanup:
 	if (sobj) {
-		radeon_object_unpin(sobj);
-		radeon_object_unref(&sobj);
+		r = radeon_bo_reserve(sobj, false);
+		if (likely(r == 0)) {
+			radeon_bo_unpin(sobj);
+			radeon_bo_unreserve(sobj);
+		}
+		radeon_bo_unref(&sobj);
 	}
 	if (dobj) {
-		radeon_object_unpin(dobj);
-		radeon_object_unref(&dobj);
+		r = radeon_bo_reserve(dobj, false);
+		if (likely(r == 0)) {
+			radeon_bo_unpin(dobj);
+			radeon_bo_unreserve(dobj);
+		}
+		radeon_bo_unref(&dobj);
 	}
 	if (fence) {
 		radeon_fence_unref(&fence);
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index a813541..b062109 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -44,6 +44,10 @@
 
 	ref_div =
 	    RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
+
+	if (ref_div == 0)
+		return 0;
+
 	sclk = fb_div / ref_div;
 
 	post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK;
@@ -70,6 +74,10 @@
 
 	ref_div =
 	    RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
+
+	if (ref_div == 0)
+		return 0;
+
 	mclk = fb_div / ref_div;
 
 	post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7;
@@ -98,8 +106,19 @@
 		ret = radeon_combios_get_clock_info(dev);
 
 	if (ret) {
-		if (p1pll->reference_div < 2)
-			p1pll->reference_div = 12;
+		if (p1pll->reference_div < 2) {
+			if (!ASIC_IS_AVIVO(rdev)) {
+				u32 tmp = RREG32_PLL(RADEON_PPLL_REF_DIV);
+				if (ASIC_IS_R300(rdev))
+					p1pll->reference_div =
+						(tmp & R300_PPLL_REF_DIV_ACC_MASK) >> R300_PPLL_REF_DIV_ACC_SHIFT;
+				else
+					p1pll->reference_div = tmp & RADEON_PPLL_REF_DIV_MASK;
+				if (p1pll->reference_div < 2)
+					p1pll->reference_div = 12;
+			} else
+				p1pll->reference_div = 12;
+		}
 		if (p2pll->reference_div < 2)
 			p2pll->reference_div = 12;
 		if (rdev->family < CHIP_RS600) {
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 5253cbf..c5021a3 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -50,7 +50,8 @@
 			    uint32_t supported_device,
 			    int connector_type,
 			    struct radeon_i2c_bus_rec *i2c_bus,
-			    uint16_t connector_object_id);
+			    uint16_t connector_object_id,
+			    struct radeon_hpd *hpd);
 
 /* from radeon_legacy_encoder.c */
 extern void
@@ -442,39 +443,71 @@
 
 }
 
-struct radeon_i2c_bus_rec combios_setup_i2c_bus(int ddc_line)
+static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev,
+						       int ddc_line)
 {
 	struct radeon_i2c_bus_rec i2c;
 
-	i2c.mask_clk_mask = RADEON_GPIO_EN_1;
-	i2c.mask_data_mask = RADEON_GPIO_EN_0;
-	i2c.a_clk_mask = RADEON_GPIO_A_1;
-	i2c.a_data_mask = RADEON_GPIO_A_0;
-	i2c.put_clk_mask = RADEON_GPIO_EN_1;
-	i2c.put_data_mask = RADEON_GPIO_EN_0;
-	i2c.get_clk_mask = RADEON_GPIO_Y_1;
-	i2c.get_data_mask = RADEON_GPIO_Y_0;
-	if ((ddc_line == RADEON_LCD_GPIO_MASK) ||
-	    (ddc_line == RADEON_MDGPIO_EN_REG)) {
-		i2c.mask_clk_reg = ddc_line;
-		i2c.mask_data_reg = ddc_line;
-		i2c.a_clk_reg = ddc_line;
-		i2c.a_data_reg = ddc_line;
-		i2c.put_clk_reg = ddc_line;
-		i2c.put_data_reg = ddc_line;
-		i2c.get_clk_reg = ddc_line + 4;
-		i2c.get_data_reg = ddc_line + 4;
+	if (ddc_line == RADEON_GPIOPAD_MASK) {
+		i2c.mask_clk_reg = RADEON_GPIOPAD_MASK;
+		i2c.mask_data_reg = RADEON_GPIOPAD_MASK;
+		i2c.a_clk_reg = RADEON_GPIOPAD_A;
+		i2c.a_data_reg = RADEON_GPIOPAD_A;
+		i2c.en_clk_reg = RADEON_GPIOPAD_EN;
+		i2c.en_data_reg = RADEON_GPIOPAD_EN;
+		i2c.y_clk_reg = RADEON_GPIOPAD_Y;
+		i2c.y_data_reg = RADEON_GPIOPAD_Y;
+	} else if (ddc_line == RADEON_MDGPIO_MASK) {
+		i2c.mask_clk_reg = RADEON_MDGPIO_MASK;
+		i2c.mask_data_reg = RADEON_MDGPIO_MASK;
+		i2c.a_clk_reg = RADEON_MDGPIO_A;
+		i2c.a_data_reg = RADEON_MDGPIO_A;
+		i2c.en_clk_reg = RADEON_MDGPIO_EN;
+		i2c.en_data_reg = RADEON_MDGPIO_EN;
+		i2c.y_clk_reg = RADEON_MDGPIO_Y;
+		i2c.y_data_reg = RADEON_MDGPIO_Y;
 	} else {
+		i2c.mask_clk_mask = RADEON_GPIO_EN_1;
+		i2c.mask_data_mask = RADEON_GPIO_EN_0;
+		i2c.a_clk_mask = RADEON_GPIO_A_1;
+		i2c.a_data_mask = RADEON_GPIO_A_0;
+		i2c.en_clk_mask = RADEON_GPIO_EN_1;
+		i2c.en_data_mask = RADEON_GPIO_EN_0;
+		i2c.y_clk_mask = RADEON_GPIO_Y_1;
+		i2c.y_data_mask = RADEON_GPIO_Y_0;
+
 		i2c.mask_clk_reg = ddc_line;
 		i2c.mask_data_reg = ddc_line;
 		i2c.a_clk_reg = ddc_line;
 		i2c.a_data_reg = ddc_line;
-		i2c.put_clk_reg = ddc_line;
-		i2c.put_data_reg = ddc_line;
-		i2c.get_clk_reg = ddc_line;
-		i2c.get_data_reg = ddc_line;
+		i2c.en_clk_reg = ddc_line;
+		i2c.en_data_reg = ddc_line;
+		i2c.y_clk_reg = ddc_line;
+		i2c.y_data_reg = ddc_line;
 	}
 
+	if (rdev->family < CHIP_R200)
+		i2c.hw_capable = false;
+	else {
+		switch (ddc_line) {
+		case RADEON_GPIO_VGA_DDC:
+		case RADEON_GPIO_DVI_DDC:
+			i2c.hw_capable = true;
+			break;
+		case RADEON_GPIO_MONID:
+			/* hw i2c on RADEON_GPIO_MONID doesn't seem to work
+			 * reliably on some pre-r4xx hardware; not sure why.
+			 */
+			i2c.hw_capable = false;
+			break;
+		default:
+			i2c.hw_capable = false;
+			break;
+		}
+	}
+	i2c.mm_i2c = false;
+	i2c.i2c_id = 0;
+
 	if (ddc_line)
 		i2c.valid = true;
 	else
@@ -495,7 +528,7 @@
 	uint16_t sclk, mclk;
 
 	if (rdev->bios == NULL)
-		return NULL;
+		return false;
 
 	pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE);
 	if (pll_info) {
@@ -993,8 +1026,8 @@
 	{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_R420  */
 	{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_R423  */
 	{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_RV410 */
-	{{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}},	/* CHIP_RS400 */
-	{{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}},	/* CHIP_RS480 */
+	{ {0, 0}, {0, 0}, {0, 0}, {0, 0} },	/* CHIP_RS400 */
+	{ {0, 0}, {0, 0}, {0, 0}, {0, 0} },	/* CHIP_RS480 */
 };
 
 bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
@@ -1028,7 +1061,6 @@
 	tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
 
 	if (tmds_info) {
-
 		ver = RBIOS8(tmds_info);
 		DRM_INFO("DFP table revision: %d\n", ver);
 		if (ver == 3) {
@@ -1063,51 +1095,139 @@
 					  tmds->tmds_pll[i].value);
 			}
 		}
-	} else
+	} else {
 		DRM_INFO("No TMDS info found in BIOS\n");
+		return false;
+	}
 	return true;
 }
 
-struct radeon_encoder_int_tmds *radeon_combios_get_tmds_info(struct radeon_encoder *encoder)
-{
-	struct radeon_encoder_int_tmds *tmds = NULL;
-	bool ret;
-
-	tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL);
-
-	if (!tmds)
-		return NULL;
-
-	ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds);
-	if (ret == false)
-		radeon_legacy_get_tmds_info_from_table(encoder, tmds);
-
-	return tmds;
-}
-
-void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder)
+bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
+						struct radeon_encoder_ext_tmds *tmds)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct radeon_device *rdev = dev->dev_private;
-	uint16_t ext_tmds_info;
-	uint8_t ver;
+	struct radeon_i2c_bus_rec i2c_bus;
+
+	/* default for macs */
+	i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
+	tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+
+	/* XXX some macs have duallink chips */
+	switch (rdev->mode_info.connector_table) {
+	case CT_POWERBOOK_EXTERNAL:
+	case CT_MINI_EXTERNAL:
+	default:
+		tmds->dvo_chip = DVO_SIL164;
+		tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
+		break;
+	}
+
+	return true;
+}
+
+bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder,
+						  struct radeon_encoder_ext_tmds *tmds)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint16_t offset;
+	uint8_t ver, id, blocks, clk, data;
+	int i;
+	enum radeon_combios_ddc gpio;
+	struct radeon_i2c_bus_rec i2c_bus;
 
 	if (rdev->bios == NULL)
-		return;
+		return false;
 
-	ext_tmds_info =
-	    combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
-	if (ext_tmds_info) {
-		ver = RBIOS8(ext_tmds_info);
-		DRM_INFO("External TMDS Table revision: %d\n", ver);
-		// TODO
+	tmds->i2c_bus = NULL;
+	if (rdev->flags & RADEON_IS_IGP) {
+		offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
+		if (offset) {
+			ver = RBIOS8(offset);
+			DRM_INFO("GPIO Table revision: %d\n", ver);
+			blocks = RBIOS8(offset + 2);
+			for (i = 0; i < blocks; i++) {
+				id = RBIOS8(offset + 3 + (i * 5) + 0);
+				if (id == 136) {
+					clk = RBIOS8(offset + 3 + (i * 5) + 3);
+					data = RBIOS8(offset + 3 + (i * 5) + 4);
+					i2c_bus.valid = true;
+					i2c_bus.mask_clk_mask = (1 << clk);
+					i2c_bus.mask_data_mask = (1 << data);
+					i2c_bus.a_clk_mask = (1 << clk);
+					i2c_bus.a_data_mask = (1 << data);
+					i2c_bus.en_clk_mask = (1 << clk);
+					i2c_bus.en_data_mask = (1 << data);
+					i2c_bus.y_clk_mask = (1 << clk);
+					i2c_bus.y_data_mask = (1 << data);
+					i2c_bus.mask_clk_reg = RADEON_GPIOPAD_MASK;
+					i2c_bus.mask_data_reg = RADEON_GPIOPAD_MASK;
+					i2c_bus.a_clk_reg = RADEON_GPIOPAD_A;
+					i2c_bus.a_data_reg = RADEON_GPIOPAD_A;
+					i2c_bus.en_clk_reg = RADEON_GPIOPAD_EN;
+					i2c_bus.en_data_reg = RADEON_GPIOPAD_EN;
+					i2c_bus.y_clk_reg = RADEON_GPIOPAD_Y;
+					i2c_bus.y_data_reg = RADEON_GPIOPAD_Y;
+					tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+					tmds->dvo_chip = DVO_SIL164;
+					tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
+					break;
+				}
+			}
+		}
+	} else {
+		offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+		if (offset) {
+			ver = RBIOS8(offset);
+			DRM_INFO("External TMDS Table revision: %d\n", ver);
+			tmds->slave_addr = RBIOS8(offset + 4 + 2);
+			tmds->slave_addr >>= 1; /* 7 bit addressing */
+			gpio = RBIOS8(offset + 4 + 3);
+			switch (gpio) {
+			case DDC_MONID:
+				i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
+				tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+				break;
+			case DDC_DVI:
+				i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+				tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+				break;
+			case DDC_VGA:
+				i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+				tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+				break;
+			case DDC_CRT2:
+				/* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */
+				if (rdev->family >= CHIP_R300)
+					i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
+				else
+					i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
+				tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+				break;
+			case DDC_LCD: /* MM i2c */
+				DRM_ERROR("MM i2c requires hw i2c engine\n");
+				break;
+			default:
+				DRM_ERROR("Unsupported gpio %d\n", gpio);
+				break;
+			}
+		}
 	}
+
+	if (!tmds->i2c_bus) {
+		DRM_INFO("No valid Ext TMDS info found in BIOS\n");
+		return false;
+	}
+
+	return true;
 }
 
 bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
 {
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_i2c_bus_rec ddc_i2c;
+	struct radeon_hpd hpd;
 
 	rdev->mode_info.connector_table = radeon_connector_table;
 	if (rdev->mode_info.connector_table == CT_NONE) {
@@ -1168,7 +1288,8 @@
 		/* these are the most common settings */
 		if (rdev->flags & RADEON_SINGLE_CRTC) {
 			/* VGA - primary dac */
-			ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+			ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+			hpd.hpd = RADEON_HPD_NONE;
 			radeon_add_legacy_encoder(dev,
 						  radeon_get_encoder_id(dev,
 									ATOM_DEVICE_CRT1_SUPPORT,
@@ -1178,10 +1299,12 @@
 						    ATOM_DEVICE_CRT1_SUPPORT,
 						    DRM_MODE_CONNECTOR_VGA,
 						    &ddc_i2c,
-						    CONNECTOR_OBJECT_ID_VGA);
+						    CONNECTOR_OBJECT_ID_VGA,
+						    &hpd);
 		} else if (rdev->flags & RADEON_IS_MOBILITY) {
 			/* LVDS */
-			ddc_i2c = combios_setup_i2c_bus(RADEON_LCD_GPIO_MASK);
+			ddc_i2c = combios_setup_i2c_bus(rdev, 0);
+			hpd.hpd = RADEON_HPD_NONE;
 			radeon_add_legacy_encoder(dev,
 						  radeon_get_encoder_id(dev,
 									ATOM_DEVICE_LCD1_SUPPORT,
@@ -1191,10 +1314,12 @@
 						    ATOM_DEVICE_LCD1_SUPPORT,
 						    DRM_MODE_CONNECTOR_LVDS,
 						    &ddc_i2c,
-						    CONNECTOR_OBJECT_ID_LVDS);
+						    CONNECTOR_OBJECT_ID_LVDS,
+						    &hpd);
 
 			/* VGA - primary dac */
-			ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+			ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+			hpd.hpd = RADEON_HPD_NONE;
 			radeon_add_legacy_encoder(dev,
 						  radeon_get_encoder_id(dev,
 									ATOM_DEVICE_CRT1_SUPPORT,
@@ -1204,10 +1329,12 @@
 						    ATOM_DEVICE_CRT1_SUPPORT,
 						    DRM_MODE_CONNECTOR_VGA,
 						    &ddc_i2c,
-						    CONNECTOR_OBJECT_ID_VGA);
+						    CONNECTOR_OBJECT_ID_VGA,
+						    &hpd);
 		} else {
 			/* DVI-I - tv dac, int tmds */
-			ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+			ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+			hpd.hpd = RADEON_HPD_1;
 			radeon_add_legacy_encoder(dev,
 						  radeon_get_encoder_id(dev,
 									ATOM_DEVICE_DFP1_SUPPORT,
@@ -1223,10 +1350,12 @@
 						    ATOM_DEVICE_CRT2_SUPPORT,
 						    DRM_MODE_CONNECTOR_DVII,
 						    &ddc_i2c,
-						    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
+						    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+						    &hpd);
 
 			/* VGA - primary dac */
-			ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+			ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+			hpd.hpd = RADEON_HPD_NONE;
 			radeon_add_legacy_encoder(dev,
 						  radeon_get_encoder_id(dev,
 									ATOM_DEVICE_CRT1_SUPPORT,
@@ -1236,11 +1365,14 @@
 						    ATOM_DEVICE_CRT1_SUPPORT,
 						    DRM_MODE_CONNECTOR_VGA,
 						    &ddc_i2c,
-						    CONNECTOR_OBJECT_ID_VGA);
+						    CONNECTOR_OBJECT_ID_VGA,
+						    &hpd);
 		}
 
 		if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) {
 			/* TV - tv dac */
+			ddc_i2c.valid = false;
+			hpd.hpd = RADEON_HPD_NONE;
 			radeon_add_legacy_encoder(dev,
 						  radeon_get_encoder_id(dev,
 									ATOM_DEVICE_TV1_SUPPORT,
@@ -1250,14 +1382,16 @@
 						    ATOM_DEVICE_TV1_SUPPORT,
 						    DRM_MODE_CONNECTOR_SVIDEO,
 						    &ddc_i2c,
-						    CONNECTOR_OBJECT_ID_SVIDEO);
+						    CONNECTOR_OBJECT_ID_SVIDEO,
+						    &hpd);
 		}
 		break;
 	case CT_IBOOK:
 		DRM_INFO("Connector Table: %d (ibook)\n",
 			 rdev->mode_info.connector_table);
 		/* LVDS */
-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
 					  radeon_get_encoder_id(dev,
 								ATOM_DEVICE_LCD1_SUPPORT,
@@ -1265,9 +1399,11 @@
 					  ATOM_DEVICE_LCD1_SUPPORT);
 		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
 					    DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
-					    CONNECTOR_OBJECT_ID_LVDS);
+					    CONNECTOR_OBJECT_ID_LVDS,
+					    &hpd);
 		/* VGA - TV DAC */
-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
 					  radeon_get_encoder_id(dev,
 								ATOM_DEVICE_CRT2_SUPPORT,
@@ -1275,8 +1411,11 @@
 					  ATOM_DEVICE_CRT2_SUPPORT);
 		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
 					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
-					    CONNECTOR_OBJECT_ID_VGA);
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
 		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
 					  radeon_get_encoder_id(dev,
 								ATOM_DEVICE_TV1_SUPPORT,
@@ -1285,13 +1424,15 @@
 		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
 					    DRM_MODE_CONNECTOR_SVIDEO,
 					    &ddc_i2c,
-					    CONNECTOR_OBJECT_ID_SVIDEO);
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
 		break;
 	case CT_POWERBOOK_EXTERNAL:
 		DRM_INFO("Connector Table: %d (powerbook external tmds)\n",
 			 rdev->mode_info.connector_table);
 		/* LVDS */
-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
 					  radeon_get_encoder_id(dev,
 								ATOM_DEVICE_LCD1_SUPPORT,
@@ -1299,9 +1440,11 @@
 					  ATOM_DEVICE_LCD1_SUPPORT);
 		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
 					    DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
-					    CONNECTOR_OBJECT_ID_LVDS);
+					    CONNECTOR_OBJECT_ID_LVDS,
+					    &hpd);
 		/* DVI-I - primary dac, ext tmds */
-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+		hpd.hpd = RADEON_HPD_2; /* ??? */
 		radeon_add_legacy_encoder(dev,
 					  radeon_get_encoder_id(dev,
 								ATOM_DEVICE_DFP2_SUPPORT,
@@ -1317,8 +1460,11 @@
 					    ATOM_DEVICE_DFP2_SUPPORT |
 					    ATOM_DEVICE_CRT1_SUPPORT,
 					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
-					    CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I);
+					    CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
+					    &hpd);
 		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
 					  radeon_get_encoder_id(dev,
 								ATOM_DEVICE_TV1_SUPPORT,
@@ -1327,13 +1473,15 @@
 		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
 					    DRM_MODE_CONNECTOR_SVIDEO,
 					    &ddc_i2c,
-					    CONNECTOR_OBJECT_ID_SVIDEO);
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
 		break;
 	case CT_POWERBOOK_INTERNAL:
 		DRM_INFO("Connector Table: %d (powerbook internal tmds)\n",
 			 rdev->mode_info.connector_table);
 		/* LVDS */
-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
 					  radeon_get_encoder_id(dev,
 								ATOM_DEVICE_LCD1_SUPPORT,
@@ -1341,9 +1489,11 @@
 					  ATOM_DEVICE_LCD1_SUPPORT);
 		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
 					    DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
-					    CONNECTOR_OBJECT_ID_LVDS);
+					    CONNECTOR_OBJECT_ID_LVDS,
+					    &hpd);
 		/* DVI-I - primary dac, int tmds */
-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
 		radeon_add_legacy_encoder(dev,
 					  radeon_get_encoder_id(dev,
 								ATOM_DEVICE_DFP1_SUPPORT,
@@ -1358,8 +1508,11 @@
 					    ATOM_DEVICE_DFP1_SUPPORT |
 					    ATOM_DEVICE_CRT1_SUPPORT,
 					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
-					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
 		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
 					  radeon_get_encoder_id(dev,
 								ATOM_DEVICE_TV1_SUPPORT,
@@ -1368,13 +1521,15 @@
 		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
 					    DRM_MODE_CONNECTOR_SVIDEO,
 					    &ddc_i2c,
-					    CONNECTOR_OBJECT_ID_SVIDEO);
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
 		break;
 	case CT_POWERBOOK_VGA:
 		DRM_INFO("Connector Table: %d (powerbook vga)\n",
 			 rdev->mode_info.connector_table);
 		/* LVDS */
-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
 					  radeon_get_encoder_id(dev,
 								ATOM_DEVICE_LCD1_SUPPORT,
@@ -1382,9 +1537,11 @@
 					  ATOM_DEVICE_LCD1_SUPPORT);
 		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
 					    DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
-					    CONNECTOR_OBJECT_ID_LVDS);
+					    CONNECTOR_OBJECT_ID_LVDS,
+					    &hpd);
 		/* VGA - primary dac */
-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
 					  radeon_get_encoder_id(dev,
 								ATOM_DEVICE_CRT1_SUPPORT,
@@ -1392,8 +1549,11 @@
 					  ATOM_DEVICE_CRT1_SUPPORT);
 		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
 					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
-					    CONNECTOR_OBJECT_ID_VGA);
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
 		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
 					  radeon_get_encoder_id(dev,
 								ATOM_DEVICE_TV1_SUPPORT,
@@ -1402,13 +1562,15 @@
 		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
 					    DRM_MODE_CONNECTOR_SVIDEO,
 					    &ddc_i2c,
-					    CONNECTOR_OBJECT_ID_SVIDEO);
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
 		break;
 	case CT_MINI_EXTERNAL:
 		DRM_INFO("Connector Table: %d (mini external tmds)\n",
 			 rdev->mode_info.connector_table);
 		/* DVI-I - tv dac, ext tmds */
-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
+		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
+		hpd.hpd = RADEON_HPD_2; /* ??? */
 		radeon_add_legacy_encoder(dev,
 					  radeon_get_encoder_id(dev,
 								ATOM_DEVICE_DFP2_SUPPORT,
@@ -1424,8 +1586,11 @@
 					    ATOM_DEVICE_DFP2_SUPPORT |
 					    ATOM_DEVICE_CRT2_SUPPORT,
 					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
-					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
 		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
 					  radeon_get_encoder_id(dev,
 								ATOM_DEVICE_TV1_SUPPORT,
@@ -1434,13 +1599,15 @@
 		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
 					    DRM_MODE_CONNECTOR_SVIDEO,
 					    &ddc_i2c,
-					    CONNECTOR_OBJECT_ID_SVIDEO);
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
 		break;
 	case CT_MINI_INTERNAL:
 		DRM_INFO("Connector Table: %d (mini internal tmds)\n",
 			 rdev->mode_info.connector_table);
 		/* DVI-I - tv dac, int tmds */
-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
+		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
 		radeon_add_legacy_encoder(dev,
 					  radeon_get_encoder_id(dev,
 								ATOM_DEVICE_DFP1_SUPPORT,
@@ -1455,8 +1622,11 @@
 					    ATOM_DEVICE_DFP1_SUPPORT |
 					    ATOM_DEVICE_CRT2_SUPPORT,
 					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
-					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
 		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
 					  radeon_get_encoder_id(dev,
 								ATOM_DEVICE_TV1_SUPPORT,
@@ -1465,13 +1635,15 @@
 		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
 					    DRM_MODE_CONNECTOR_SVIDEO,
 					    &ddc_i2c,
-					    CONNECTOR_OBJECT_ID_SVIDEO);
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
 		break;
 	case CT_IMAC_G5_ISIGHT:
 		DRM_INFO("Connector Table: %d (imac g5 isight)\n",
 			 rdev->mode_info.connector_table);
 		/* DVI-D - int tmds */
-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID);
+		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
 		radeon_add_legacy_encoder(dev,
 					  radeon_get_encoder_id(dev,
 								ATOM_DEVICE_DFP1_SUPPORT,
@@ -1479,9 +1651,11 @@
 					  ATOM_DEVICE_DFP1_SUPPORT);
 		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT,
 					    DRM_MODE_CONNECTOR_DVID, &ddc_i2c,
-					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D);
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D,
+					    &hpd);
 		/* VGA - tv dac */
-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
 					  radeon_get_encoder_id(dev,
 								ATOM_DEVICE_CRT2_SUPPORT,
@@ -1489,8 +1663,11 @@
 					  ATOM_DEVICE_CRT2_SUPPORT);
 		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
 					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
-					    CONNECTOR_OBJECT_ID_VGA);
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
 		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
 					  radeon_get_encoder_id(dev,
 								ATOM_DEVICE_TV1_SUPPORT,
@@ -1499,13 +1676,15 @@
 		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
 					    DRM_MODE_CONNECTOR_SVIDEO,
 					    &ddc_i2c,
-					    CONNECTOR_OBJECT_ID_SVIDEO);
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
 		break;
 	case CT_EMAC:
 		DRM_INFO("Connector Table: %d (emac)\n",
 			 rdev->mode_info.connector_table);
 		/* VGA - primary dac */
-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
 					  radeon_get_encoder_id(dev,
 								ATOM_DEVICE_CRT1_SUPPORT,
@@ -1513,9 +1692,11 @@
 					  ATOM_DEVICE_CRT1_SUPPORT);
 		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
 					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
-					    CONNECTOR_OBJECT_ID_VGA);
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
 		/* VGA - tv dac */
-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
+		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
+		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
 					  radeon_get_encoder_id(dev,
 								ATOM_DEVICE_CRT2_SUPPORT,
@@ -1523,8 +1704,11 @@
 					  ATOM_DEVICE_CRT2_SUPPORT);
 		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
 					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
-					    CONNECTOR_OBJECT_ID_VGA);
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
 		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
 		radeon_add_legacy_encoder(dev,
 					  radeon_get_encoder_id(dev,
 								ATOM_DEVICE_TV1_SUPPORT,
@@ -1533,7 +1717,8 @@
 		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
 					    DRM_MODE_CONNECTOR_SVIDEO,
 					    &ddc_i2c,
-					    CONNECTOR_OBJECT_ID_SVIDEO);
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
 		break;
 	default:
 		DRM_INFO("Connector table: %d (invalid)\n",
@@ -1550,7 +1735,8 @@
 				       int bios_index,
 				       enum radeon_combios_connector
 				       *legacy_connector,
-				       struct radeon_i2c_bus_rec *ddc_i2c)
+				       struct radeon_i2c_bus_rec *ddc_i2c,
+				       struct radeon_hpd *hpd)
 {
 	struct radeon_device *rdev = dev->dev_private;
 
@@ -1558,29 +1744,26 @@
 	if ((rdev->family == CHIP_RS400 ||
 	     rdev->family == CHIP_RS480) &&
 	    ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
-		*ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID);
+		*ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
 	else if ((rdev->family == CHIP_RS400 ||
 		  rdev->family == CHIP_RS480) &&
 		 ddc_i2c->mask_clk_reg == RADEON_GPIO_MONID) {
-		ddc_i2c->valid = true;
+		*ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIOPAD_MASK);
 		ddc_i2c->mask_clk_mask = (0x20 << 8);
 		ddc_i2c->mask_data_mask = 0x80;
 		ddc_i2c->a_clk_mask = (0x20 << 8);
 		ddc_i2c->a_data_mask = 0x80;
-		ddc_i2c->put_clk_mask = (0x20 << 8);
-		ddc_i2c->put_data_mask = 0x80;
-		ddc_i2c->get_clk_mask = (0x20 << 8);
-		ddc_i2c->get_data_mask = 0x80;
-		ddc_i2c->mask_clk_reg = RADEON_GPIOPAD_MASK;
-		ddc_i2c->mask_data_reg = RADEON_GPIOPAD_MASK;
-		ddc_i2c->a_clk_reg = RADEON_GPIOPAD_A;
-		ddc_i2c->a_data_reg = RADEON_GPIOPAD_A;
-		ddc_i2c->put_clk_reg = RADEON_GPIOPAD_EN;
-		ddc_i2c->put_data_reg = RADEON_GPIOPAD_EN;
-		ddc_i2c->get_clk_reg = RADEON_LCD_GPIO_Y_REG;
-		ddc_i2c->get_data_reg = RADEON_LCD_GPIO_Y_REG;
+		ddc_i2c->en_clk_mask = (0x20 << 8);
+		ddc_i2c->en_data_mask = 0x80;
+		ddc_i2c->y_clk_mask = (0x20 << 8);
+		ddc_i2c->y_data_mask = 0x80;
 	}
 
+	/* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */
+	if ((rdev->family >= CHIP_R300) &&
+	    ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
+		*ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+
 	/* Certain IBM chipset RN50s have a BIOS reporting two VGAs,
 	   one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */
 	if (dev->pdev->device == 0x515e &&
@@ -1624,6 +1807,12 @@
 	    dev->pdev->subsystem_device == 0x280a)
 		return false;
 
+	/* MSI S270 has non-existent TV port */
+	if (dev->pdev->device == 0x5955 &&
+	    dev->pdev->subsystem_vendor == 0x1462 &&
+	    dev->pdev->subsystem_device == 0x0131)
+		return false;
+
 	return true;
 }
 
@@ -1671,6 +1860,7 @@
 	enum radeon_combios_connector connector;
 	int i = 0;
 	struct radeon_i2c_bus_rec ddc_i2c;
+	struct radeon_hpd hpd;
 
 	if (rdev->bios == NULL)
 		return false;
@@ -1691,26 +1881,40 @@
 			switch (ddc_type) {
 			case DDC_MONID:
 				ddc_i2c =
-				    combios_setup_i2c_bus(RADEON_GPIO_MONID);
+					combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
 				break;
 			case DDC_DVI:
 				ddc_i2c =
-				    combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+					combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
 				break;
 			case DDC_VGA:
 				ddc_i2c =
-				    combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+					combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
 				break;
 			case DDC_CRT2:
 				ddc_i2c =
-				    combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
+					combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
 				break;
 			default:
 				break;
 			}
 
+			switch (connector) {
+			case CONNECTOR_PROPRIETARY_LEGACY:
+			case CONNECTOR_DVI_I_LEGACY:
+			case CONNECTOR_DVI_D_LEGACY:
+				if ((tmp >> 4) & 0x1)
+					hpd.hpd = RADEON_HPD_2;
+				else
+					hpd.hpd = RADEON_HPD_1;
+				break;
+			default:
+				hpd.hpd = RADEON_HPD_NONE;
+				break;
+			}
+
 			if (!radeon_apply_legacy_quirks(dev, i, &connector,
-						       &ddc_i2c))
+							&ddc_i2c, &hpd))
 				continue;
 
 			switch (connector) {
@@ -1727,7 +1931,8 @@
 							    legacy_connector_convert
 							    [connector],
 							    &ddc_i2c,
-							    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D);
+							    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D,
+							    &hpd);
 				break;
 			case CONNECTOR_CRT_LEGACY:
 				if (tmp & 0x1) {
@@ -1753,7 +1958,8 @@
 							    legacy_connector_convert
 							    [connector],
 							    &ddc_i2c,
-							    CONNECTOR_OBJECT_ID_VGA);
+							    CONNECTOR_OBJECT_ID_VGA,
+							    &hpd);
 				break;
 			case CONNECTOR_DVI_I_LEGACY:
 				devices = 0;
@@ -1799,7 +2005,8 @@
 							    legacy_connector_convert
 							    [connector],
 							    &ddc_i2c,
-							    connector_object_id);
+							    connector_object_id,
+							    &hpd);
 				break;
 			case CONNECTOR_DVI_D_LEGACY:
 				if ((tmp >> 4) & 0x1) {
@@ -1817,7 +2024,8 @@
 							    legacy_connector_convert
 							    [connector],
 							    &ddc_i2c,
-							    connector_object_id);
+							    connector_object_id,
+							    &hpd);
 				break;
 			case CONNECTOR_CTV_LEGACY:
 			case CONNECTOR_STV_LEGACY:
@@ -1832,7 +2040,8 @@
 							    legacy_connector_convert
 							    [connector],
 							    &ddc_i2c,
-							    CONNECTOR_OBJECT_ID_SVIDEO);
+							    CONNECTOR_OBJECT_ID_SVIDEO,
+							    &hpd);
 				break;
 			default:
 				DRM_ERROR("Unknown connector type: %d\n",
@@ -1858,14 +2067,16 @@
 									0),
 						  ATOM_DEVICE_DFP1_SUPPORT);
 
-			ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+			ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+			hpd.hpd = RADEON_HPD_NONE;
 			radeon_add_legacy_connector(dev,
 						    0,
 						    ATOM_DEVICE_CRT1_SUPPORT |
 						    ATOM_DEVICE_DFP1_SUPPORT,
 						    DRM_MODE_CONNECTOR_DVII,
 						    &ddc_i2c,
-						    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
+						    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+						    &hpd);
 		} else {
 			uint16_t crt_info =
 				combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
@@ -1876,13 +2087,15 @@
 										ATOM_DEVICE_CRT1_SUPPORT,
 										1),
 							  ATOM_DEVICE_CRT1_SUPPORT);
-				ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+				ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+				hpd.hpd = RADEON_HPD_NONE;
 				radeon_add_legacy_connector(dev,
 							    0,
 							    ATOM_DEVICE_CRT1_SUPPORT,
 							    DRM_MODE_CONNECTOR_VGA,
 							    &ddc_i2c,
-							    CONNECTOR_OBJECT_ID_VGA);
+							    CONNECTOR_OBJECT_ID_VGA,
+							    &hpd);
 			} else {
 				DRM_DEBUG("No connector info found\n");
 				return false;
@@ -1910,27 +2123,27 @@
 				case DDC_MONID:
 					ddc_i2c =
 					    combios_setup_i2c_bus
-					    (RADEON_GPIO_MONID);
+						(rdev, RADEON_GPIO_MONID);
 					break;
 				case DDC_DVI:
 					ddc_i2c =
 					    combios_setup_i2c_bus
-					    (RADEON_GPIO_DVI_DDC);
+						(rdev, RADEON_GPIO_DVI_DDC);
 					break;
 				case DDC_VGA:
 					ddc_i2c =
 					    combios_setup_i2c_bus
-					    (RADEON_GPIO_VGA_DDC);
+						(rdev, RADEON_GPIO_VGA_DDC);
 					break;
 				case DDC_CRT2:
 					ddc_i2c =
 					    combios_setup_i2c_bus
-					    (RADEON_GPIO_CRT2_DDC);
+						(rdev, RADEON_GPIO_CRT2_DDC);
 					break;
 				case DDC_LCD:
 					ddc_i2c =
 					    combios_setup_i2c_bus
-					    (RADEON_LCD_GPIO_MASK);
+						(rdev, RADEON_GPIOPAD_MASK);
 					ddc_i2c.mask_clk_mask =
 					    RBIOS32(lcd_ddc_info + 3);
 					ddc_i2c.mask_data_mask =
@@ -1939,19 +2152,19 @@
 					    RBIOS32(lcd_ddc_info + 3);
 					ddc_i2c.a_data_mask =
 					    RBIOS32(lcd_ddc_info + 7);
-					ddc_i2c.put_clk_mask =
+					ddc_i2c.en_clk_mask =
 					    RBIOS32(lcd_ddc_info + 3);
-					ddc_i2c.put_data_mask =
+					ddc_i2c.en_data_mask =
 					    RBIOS32(lcd_ddc_info + 7);
-					ddc_i2c.get_clk_mask =
+					ddc_i2c.y_clk_mask =
 					    RBIOS32(lcd_ddc_info + 3);
-					ddc_i2c.get_data_mask =
+					ddc_i2c.y_data_mask =
 					    RBIOS32(lcd_ddc_info + 7);
 					break;
 				case DDC_GPIO:
 					ddc_i2c =
 					    combios_setup_i2c_bus
-					    (RADEON_MDGPIO_EN_REG);
+						(rdev, RADEON_MDGPIO_MASK);
 					ddc_i2c.mask_clk_mask =
 					    RBIOS32(lcd_ddc_info + 3);
 					ddc_i2c.mask_data_mask =
@@ -1960,13 +2173,13 @@
 					    RBIOS32(lcd_ddc_info + 3);
 					ddc_i2c.a_data_mask =
 					    RBIOS32(lcd_ddc_info + 7);
-					ddc_i2c.put_clk_mask =
+					ddc_i2c.en_clk_mask =
 					    RBIOS32(lcd_ddc_info + 3);
-					ddc_i2c.put_data_mask =
+					ddc_i2c.en_data_mask =
 					    RBIOS32(lcd_ddc_info + 7);
-					ddc_i2c.get_clk_mask =
+					ddc_i2c.y_clk_mask =
 					    RBIOS32(lcd_ddc_info + 3);
-					ddc_i2c.get_data_mask =
+					ddc_i2c.y_data_mask =
 					    RBIOS32(lcd_ddc_info + 7);
 					break;
 				default:
@@ -1977,12 +2190,14 @@
 			} else
 				ddc_i2c.valid = false;
 
+			hpd.hpd = RADEON_HPD_NONE;
 			radeon_add_legacy_connector(dev,
 						    5,
 						    ATOM_DEVICE_LCD1_SUPPORT,
 						    DRM_MODE_CONNECTOR_LVDS,
 						    &ddc_i2c,
-						    CONNECTOR_OBJECT_ID_LVDS);
+						    CONNECTOR_OBJECT_ID_LVDS,
+						    &hpd);
 		}
 	}
 
@@ -1993,6 +2208,7 @@
 		if (tv_info) {
 			if (RBIOS8(tv_info + 6) == 'T') {
 				if (radeon_apply_legacy_tv_quirks(dev)) {
+					hpd.hpd = RADEON_HPD_NONE;
 					radeon_add_legacy_encoder(dev,
 								  radeon_get_encoder_id
 								  (dev,
@@ -2003,7 +2219,8 @@
 								    ATOM_DEVICE_TV1_SUPPORT,
 								    DRM_MODE_CONNECTOR_SVIDEO,
 								    &ddc_i2c,
-								    CONNECTOR_OBJECT_ID_SVIDEO);
+								    CONNECTOR_OBJECT_ID_SVIDEO,
+								    &hpd);
 				}
 			}
 		}
@@ -2014,6 +2231,193 @@
 	return true;
 }
 
+void radeon_external_tmds_setup(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+
+	if (!tmds)
+		return;
+
+	switch (tmds->dvo_chip) {
+	case DVO_SIL164:
+		/* sil 164 */
+		radeon_i2c_do_lock(tmds->i2c_bus, 1);
+		radeon_i2c_sw_put_byte(tmds->i2c_bus,
+				       tmds->slave_addr,
+				       0x08, 0x30);
+		radeon_i2c_sw_put_byte(tmds->i2c_bus,
+				       tmds->slave_addr,
+				       0x09, 0x00);
+		radeon_i2c_sw_put_byte(tmds->i2c_bus,
+				       tmds->slave_addr,
+				       0x0a, 0x90);
+		radeon_i2c_sw_put_byte(tmds->i2c_bus,
+				       tmds->slave_addr,
+				       0x0c, 0x89);
+		radeon_i2c_sw_put_byte(tmds->i2c_bus,
+				       tmds->slave_addr,
+				       0x08, 0x3b);
+		radeon_i2c_do_lock(tmds->i2c_bus, 0);
+		break;
+	case DVO_SIL1178:
+		/* sil 1178 - untested */
+		/*
+		 * 0x0f, 0x44
+		 * 0x0f, 0x4c
+		 * 0x0e, 0x01
+		 * 0x0a, 0x80
+		 * 0x09, 0x30
+		 * 0x0c, 0xc9
+		 * 0x0d, 0x70
+		 * 0x08, 0x32
+		 * 0x08, 0x33
+		 */
+		break;
+	default:
+		break;
+	}
+
+}
+
+bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint16_t offset;
+	uint8_t blocks, slave_addr, rev;
+	uint32_t index, id;
+	uint32_t reg, val, and_mask, or_mask;
+	struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+
+	if (rdev->bios == NULL)
+		return false;
+
+	if (!tmds)
+		return false;
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		offset = combios_get_table_offset(dev, COMBIOS_TMDS_POWER_ON_TABLE);
+		rev = RBIOS8(offset);
+		if (offset) {
+			rev = RBIOS8(offset);
+			if (rev > 1) {
+				blocks = RBIOS8(offset + 3);
+				index = offset + 4;
+				while (blocks > 0) {
+					id = RBIOS16(index);
+					index += 2;
+					switch (id >> 13) {
+					case 0:
+						reg = (id & 0x1fff) * 4;
+						val = RBIOS32(index);
+						index += 4;
+						WREG32(reg, val);
+						break;
+					case 2:
+						reg = (id & 0x1fff) * 4;
+						and_mask = RBIOS32(index);
+						index += 4;
+						or_mask = RBIOS32(index);
+						index += 4;
+						val = RREG32(reg);
+						val = (val & and_mask) | or_mask;
+						WREG32(reg, val);
+						break;
+					case 3:
+						val = RBIOS16(index);
+						index += 2;
+						udelay(val);
+						break;
+					case 4:
+						val = RBIOS16(index);
+						index += 2;
+						udelay(val * 1000);
+						break;
+					case 6:
+						slave_addr = id & 0xff;
+						slave_addr >>= 1; /* 7 bit addressing */
+						index++;
+						reg = RBIOS8(index);
+						index++;
+						val = RBIOS8(index);
+						index++;
+						radeon_i2c_do_lock(tmds->i2c_bus, 1);
+						radeon_i2c_sw_put_byte(tmds->i2c_bus,
+								       slave_addr,
+								       reg, val);
+						radeon_i2c_do_lock(tmds->i2c_bus, 0);
+						break;
+					default:
+						DRM_ERROR("Unknown id %d\n", id >> 13);
+						break;
+					}
+					blocks--;
+				}
+				return true;
+			}
+		}
+	} else {
+		offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+		if (offset) {
+			index = offset + 10;
+			id = RBIOS16(index);
+			while (id != 0xffff) {
+				index += 2;
+				switch (id >> 13) {
+				case 0:
+					reg = (id & 0x1fff) * 4;
+					val = RBIOS32(index);
+					WREG32(reg, val);
+					break;
+				case 2:
+					reg = (id & 0x1fff) * 4;
+					and_mask = RBIOS32(index);
+					index += 4;
+					or_mask = RBIOS32(index);
+					index += 4;
+					val = RREG32(reg);
+					val = (val & and_mask) | or_mask;
+					WREG32(reg, val);
+					break;
+				case 4:
+					val = RBIOS16(index);
+					index += 2;
+					udelay(val);
+					break;
+				case 5:
+					reg = id & 0x1fff;
+					and_mask = RBIOS32(index);
+					index += 4;
+					or_mask = RBIOS32(index);
+					index += 4;
+					val = RREG32_PLL(reg);
+					val = (val & and_mask) | or_mask;
+					WREG32_PLL(reg, val);
+					break;
+				case 6:
+					reg = id & 0x1fff;
+					val = RBIOS8(index);
+					index += 1;
+					radeon_i2c_do_lock(tmds->i2c_bus, 1);
+					radeon_i2c_sw_put_byte(tmds->i2c_bus,
+							       tmds->slave_addr,
+							       reg, val);
+					radeon_i2c_do_lock(tmds->i2c_bus, 0);
+					break;
+				default:
+					DRM_ERROR("Unknown id %d\n", id >> 13);
+					break;
+				}
+				id = RBIOS16(index);
+			}
+			return true;
+		}
+	}
+	return false;
+}
+
 static void combios_parse_mmio_table(struct drm_device *dev, uint16_t offset)
 {
 	struct radeon_device *rdev = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 29763ce..5eece18 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -40,6 +40,26 @@
 				       struct drm_encoder *encoder,
 				       bool connected);
 
+void radeon_connector_hotplug(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+	if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
+
+	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+		if (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+			if (radeon_dp_needs_link_train(radeon_connector)) {
+				if (connector->encoder)
+					dp_link_train(connector->encoder, connector);
+			}
+		}
+	}
+
+}
+
 static void radeon_property_change_mode(struct drm_encoder *encoder)
 {
 	struct drm_crtc *crtc = encoder->crtc;
@@ -445,10 +465,10 @@
 		ret = connector_status_connected;
 	else {
 		if (radeon_connector->ddc_bus) {
-			radeon_i2c_do_lock(radeon_connector, 1);
+			radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
 			radeon_connector->edid = drm_get_edid(&radeon_connector->base,
 							      &radeon_connector->ddc_bus->adapter);
-			radeon_i2c_do_lock(radeon_connector, 0);
+			radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
 			if (radeon_connector->edid)
 				ret = connector_status_connected;
 		}
@@ -553,17 +573,17 @@
 	if (!encoder)
 		ret = connector_status_disconnected;
 
-	radeon_i2c_do_lock(radeon_connector, 1);
+	radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
 	dret = radeon_ddc_probe(radeon_connector);
-	radeon_i2c_do_lock(radeon_connector, 0);
+	radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
 	if (dret) {
 		if (radeon_connector->edid) {
 			kfree(radeon_connector->edid);
 			radeon_connector->edid = NULL;
 		}
-		radeon_i2c_do_lock(radeon_connector, 1);
+		radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
 		radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
-		radeon_i2c_do_lock(radeon_connector, 0);
+		radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
 
 		if (!radeon_connector->edid) {
 			DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@ -708,17 +728,17 @@
 	enum drm_connector_status ret = connector_status_disconnected;
 	bool dret;
 
-	radeon_i2c_do_lock(radeon_connector, 1);
+	radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
 	dret = radeon_ddc_probe(radeon_connector);
-	radeon_i2c_do_lock(radeon_connector, 0);
+	radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
 	if (dret) {
 		if (radeon_connector->edid) {
 			kfree(radeon_connector->edid);
 			radeon_connector->edid = NULL;
 		}
-		radeon_i2c_do_lock(radeon_connector, 1);
+		radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
 		radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
-		radeon_i2c_do_lock(radeon_connector, 0);
+		radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
 
 		if (!radeon_connector->edid) {
 			DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@ -735,6 +755,39 @@
 				ret = connector_status_disconnected;
 			} else
 				ret = connector_status_connected;
+
+			/* multiple connectors on the same encoder with the same ddc line
+			 * This tends to be HDMI and DVI on the same encoder with the
+			 * same ddc line.  If the edid says HDMI, consider the HDMI port
+			 * connected and the DVI port disconnected.  If the edid doesn't
+			 * say HDMI, vice versa.
+			 */
+			if (radeon_connector->shared_ddc && connector_status_connected) {
+				struct drm_device *dev = connector->dev;
+				struct drm_connector *list_connector;
+				struct radeon_connector *list_radeon_connector;
+				list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
+					if (connector == list_connector)
+						continue;
+					list_radeon_connector = to_radeon_connector(list_connector);
+					if (radeon_connector->devices == list_radeon_connector->devices) {
+						if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+							if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) {
+								kfree(radeon_connector->edid);
+								radeon_connector->edid = NULL;
+								ret = connector_status_disconnected;
+							}
+						} else {
+							if ((connector->connector_type == DRM_MODE_CONNECTOR_HDMIA) ||
+							    (connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) {
+								kfree(radeon_connector->edid);
+								radeon_connector->edid = NULL;
+								ret = connector_status_disconnected;
+							}
+						}
+					}
+				}
+			}
 		}
 	}
 
@@ -863,6 +916,91 @@
 	.force = radeon_dvi_force,
 };
 
+static void radeon_dp_connector_destroy(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+
+	if (radeon_connector->ddc_bus)
+		radeon_i2c_destroy(radeon_connector->ddc_bus);
+	if (radeon_connector->edid)
+		kfree(radeon_connector->edid);
+	if (radeon_dig_connector->dp_i2c_bus)
+		radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
+	kfree(radeon_connector->con_priv);
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static int radeon_dp_get_modes(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	int ret;
+
+	ret = radeon_ddc_get_modes(radeon_connector);
+	return ret;
+}
+
+static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	enum drm_connector_status ret = connector_status_disconnected;
+	struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+	u8 sink_type;
+
+	if (radeon_connector->edid) {
+		kfree(radeon_connector->edid);
+		radeon_connector->edid = NULL;
+	}
+
+	sink_type = radeon_dp_getsinktype(radeon_connector);
+	if (sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+		if (radeon_dp_getdpcd(radeon_connector)) {
+			radeon_dig_connector->dp_sink_type = sink_type;
+			ret = connector_status_connected;
+		}
+	} else {
+		radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+		if (radeon_ddc_probe(radeon_connector)) {
+			radeon_dig_connector->dp_sink_type = sink_type;
+			ret = connector_status_connected;
+		}
+		radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+	}
+
+	return ret;
+}
+
+static int radeon_dp_mode_valid(struct drm_connector *connector,
+				  struct drm_display_mode *mode)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+
+	/* XXX check mode bandwidth */
+
+	if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+		return radeon_dp_mode_valid_helper(radeon_connector, mode);
+	else
+		return MODE_OK;
+}
+
+struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
+	.get_modes = radeon_dp_get_modes,
+	.mode_valid = radeon_dp_mode_valid,
+	.best_encoder = radeon_dvi_encoder,
+};
+
+struct drm_connector_funcs radeon_dp_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = radeon_dp_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = radeon_connector_set_property,
+	.destroy = radeon_dp_connector_destroy,
+	.force = radeon_dvi_force,
+};
+
 void
 radeon_add_atom_connector(struct drm_device *dev,
 			  uint32_t connector_id,
@@ -871,7 +1009,8 @@
 			  struct radeon_i2c_bus_rec *i2c_bus,
 			  bool linkb,
 			  uint32_t igp_lane_info,
-			  uint16_t connector_object_id)
+			  uint16_t connector_object_id,
+			  struct radeon_hpd *hpd)
 {
 	struct radeon_device *rdev = dev->dev_private;
 	struct drm_connector *connector;
@@ -911,6 +1050,7 @@
 	radeon_connector->devices = supported_device;
 	radeon_connector->shared_ddc = shared_ddc;
 	radeon_connector->connector_object_id = connector_object_id;
+	radeon_connector->hpd = *hpd;
 	switch (connector_type) {
 	case DRM_MODE_CONNECTOR_VGA:
 		drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -963,10 +1103,12 @@
 		drm_connector_attach_property(&radeon_connector->base,
 					      rdev->mode_info.coherent_mode_property,
 					      1);
-		radeon_connector->dac_load_detect = true;
-		drm_connector_attach_property(&radeon_connector->base,
-					      rdev->mode_info.load_detect_property,
-					      1);
+		if (connector_type == DRM_MODE_CONNECTOR_DVII) {
+			radeon_connector->dac_load_detect = true;
+			drm_connector_attach_property(&radeon_connector->base,
+						      rdev->mode_info.load_detect_property,
+						      1);
+		}
 		break;
 	case DRM_MODE_CONNECTOR_HDMIA:
 	case DRM_MODE_CONNECTOR_HDMIB:
@@ -997,16 +1139,23 @@
 		radeon_dig_connector->linkb = linkb;
 		radeon_dig_connector->igp_lane_info = igp_lane_info;
 		radeon_connector->con_priv = radeon_dig_connector;
-		drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
-		ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+		drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
+		ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
 		if (ret)
 			goto failed;
 		if (i2c_bus->valid) {
+			/* add DP i2c bus */
+			radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
+			if (!radeon_dig_connector->dp_i2c_bus)
+				goto failed;
 			radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
 			if (!radeon_connector->ddc_bus)
 				goto failed;
 		}
 		subpixel_order = SubPixelHorizontalRGB;
+		drm_connector_attach_property(&radeon_connector->base,
+					      rdev->mode_info.coherent_mode_property,
+					      1);
 		break;
 	case DRM_MODE_CONNECTOR_SVIDEO:
 	case DRM_MODE_CONNECTOR_Composite:
@@ -1020,6 +1169,9 @@
 			drm_connector_attach_property(&radeon_connector->base,
 						      rdev->mode_info.load_detect_property,
 						      1);
+			drm_connector_attach_property(&radeon_connector->base,
+						      rdev->mode_info.tv_std_property,
+						      1);
 		}
 		break;
 	case DRM_MODE_CONNECTOR_LVDS:
@@ -1038,7 +1190,6 @@
 			if (!radeon_connector->ddc_bus)
 				goto failed;
 		}
-		drm_mode_create_scaling_mode_property(dev);
 		drm_connector_attach_property(&radeon_connector->base,
 					      dev->mode_config.scaling_mode_property,
 					      DRM_MODE_SCALE_FULLSCREEN);
@@ -1063,7 +1214,8 @@
 			    uint32_t supported_device,
 			    int connector_type,
 			    struct radeon_i2c_bus_rec *i2c_bus,
-			    uint16_t connector_object_id)
+			    uint16_t connector_object_id,
+			    struct radeon_hpd *hpd)
 {
 	struct radeon_device *rdev = dev->dev_private;
 	struct drm_connector *connector;
@@ -1093,6 +1245,7 @@
 	radeon_connector->connector_id = connector_id;
 	radeon_connector->devices = supported_device;
 	radeon_connector->connector_object_id = connector_object_id;
+	radeon_connector->hpd = *hpd;
 	switch (connector_type) {
 	case DRM_MODE_CONNECTOR_VGA:
 		drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -1160,6 +1313,9 @@
 			drm_connector_attach_property(&radeon_connector->base,
 						      rdev->mode_info.load_detect_property,
 						      1);
+			drm_connector_attach_property(&radeon_connector->base,
+						      rdev->mode_info.tv_std_property,
+						      1);
 		}
 		break;
 	case DRM_MODE_CONNECTOR_LVDS:
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 4f7afc7..0b2f9c2 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -1941,8 +1941,8 @@
 	for (t = 0; t < dev_priv->usec_timeout; t++) {
 		u32 done_age = GET_SCRATCH(dev_priv, 1);
 		DRM_DEBUG("done_age = %d\n", done_age);
-		for (i = start; i < dma->buf_count; i++) {
-			buf = dma->buflist[i];
+		for (i = 0; i < dma->buf_count; i++) {
+			buf = dma->buflist[start];
 			buf_priv = buf->dev_private;
 			if (buf->file_priv == NULL || (buf->pending &&
 						       buf_priv->age <=
@@ -1951,7 +1951,8 @@
 				buf->pending = 0;
 				return buf;
 			}
-			start = 0;
+			if (++start >= dma->buf_count)
+				start = 0;
 		}
 
 		if (t) {
@@ -1960,47 +1961,9 @@
 		}
 	}
 
-	DRM_DEBUG("returning NULL!\n");
 	return NULL;
 }
 
-#if 0
-struct drm_buf *radeon_freelist_get(struct drm_device * dev)
-{
-	struct drm_device_dma *dma = dev->dma;
-	drm_radeon_private_t *dev_priv = dev->dev_private;
-	drm_radeon_buf_priv_t *buf_priv;
-	struct drm_buf *buf;
-	int i, t;
-	int start;
-	u32 done_age;
-
-	done_age = radeon_read_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1));
-	if (++dev_priv->last_buf >= dma->buf_count)
-		dev_priv->last_buf = 0;
-
-	start = dev_priv->last_buf;
-	dev_priv->stats.freelist_loops++;
-
-	for (t = 0; t < 2; t++) {
-		for (i = start; i < dma->buf_count; i++) {
-			buf = dma->buflist[i];
-			buf_priv = buf->dev_private;
-			if (buf->file_priv == 0 || (buf->pending &&
-						    buf_priv->age <=
-						    done_age)) {
-				dev_priv->stats.requested_bufs++;
-				buf->pending = 0;
-				return buf;
-			}
-		}
-		start = 0;
-	}
-
-	return NULL;
-}
-#endif
-
 void radeon_freelist_reset(struct drm_device * dev)
 {
 	struct drm_device_dma *dma = dev->dma;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 5ab2cf9..65590a0 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -76,17 +76,17 @@
 			}
 			p->relocs_ptr[i] = &p->relocs[i];
 			p->relocs[i].robj = p->relocs[i].gobj->driver_private;
-			p->relocs[i].lobj.robj = p->relocs[i].robj;
+			p->relocs[i].lobj.bo = p->relocs[i].robj;
 			p->relocs[i].lobj.rdomain = r->read_domains;
 			p->relocs[i].lobj.wdomain = r->write_domain;
 			p->relocs[i].handle = r->handle;
 			p->relocs[i].flags = r->flags;
 			INIT_LIST_HEAD(&p->relocs[i].lobj.list);
-			radeon_object_list_add_object(&p->relocs[i].lobj,
-						      &p->validated);
+			radeon_bo_list_add_object(&p->relocs[i].lobj,
+						&p->validated);
 		}
 	}
-	return radeon_object_list_validate(&p->validated, p->ib->fence);
+	return radeon_bo_list_validate(&p->validated, p->ib->fence);
 }
 
 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -190,9 +190,10 @@
 	unsigned i;
 
 	if (error) {
-		radeon_object_list_unvalidate(&parser->validated);
+		radeon_bo_list_unvalidate(&parser->validated,
+						parser->ib->fence);
 	} else {
-		radeon_object_list_clean(&parser->validated);
+		radeon_bo_list_unreserve(&parser->validated);
 	}
 	for (i = 0; i < parser->nrelocs; i++) {
 		if (parser->relocs[i].gobj) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 41bb76f..02bcdb1 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -44,10 +44,11 @@
 	if (rdev->family < CHIP_R600) {
 		int i;
 
-		for (i = 0; i < 8; i++) {
-			WREG32(RADEON_SURFACE0_INFO +
-			       i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO),
-			       0);
+		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
+			if (rdev->surface_regs[i].bo)
+				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
+			else
+				radeon_clear_surface_reg(rdev, i);
 		}
 		/* enable surfaces */
 		WREG32(RADEON_SURFACE_CNTL, 0);
@@ -208,6 +209,24 @@
 
 }
 
+bool radeon_boot_test_post_card(struct radeon_device *rdev)
+{
+	if (radeon_card_posted(rdev))
+		return true;
+
+	if (rdev->bios) {
+		DRM_INFO("GPU not posted. posting now...\n");
+		if (rdev->is_atom_bios)
+			atom_asic_init(rdev->mode_info.atom_context);
+		else
+			radeon_combios_asic_init(rdev->ddev);
+		return true;
+	} else {
+		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+		return false;
+	}
+}
+
 int radeon_dummy_page_init(struct radeon_device *rdev)
 {
 	rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
@@ -463,12 +482,16 @@
 
 	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
 	radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
+	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
 	return 0;
 }
 
 void radeon_atombios_fini(struct radeon_device *rdev)
 {
-	kfree(rdev->mode_info.atom_context);
+	if (rdev->mode_info.atom_context) {
+		kfree(rdev->mode_info.atom_context->scratch);
+		kfree(rdev->mode_info.atom_context);
+	}
 	kfree(rdev->mode_info.atom_card_info);
 }
 
@@ -544,16 +567,24 @@
 	mutex_init(&rdev->cs_mutex);
 	mutex_init(&rdev->ib_pool.mutex);
 	mutex_init(&rdev->cp.mutex);
+	if (rdev->family >= CHIP_R600)
+		spin_lock_init(&rdev->ih.lock);
+	mutex_init(&rdev->gem.mutex);
 	rwlock_init(&rdev->fence_drv.lock);
 	INIT_LIST_HEAD(&rdev->gem.objects);
 
+	/* setup workqueue */
+	rdev->wq = create_workqueue("radeon");
+	if (rdev->wq == NULL)
+		return -ENOMEM;
+
 	/* Set asic functions */
 	r = radeon_asic_init(rdev);
 	if (r) {
 		return r;
 	}
 
-	if (radeon_agpmode == -1) {
+	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
 		radeon_agp_disable(rdev);
 	}
 
@@ -620,6 +651,7 @@
 	DRM_INFO("radeon: finishing device.\n");
 	rdev->shutdown = true;
 	radeon_fini(rdev);
+	destroy_workqueue(rdev->wq);
 	vga_client_register(rdev->pdev, NULL, NULL, NULL);
 	iounmap(rdev->rmmio);
 	rdev->rmmio = NULL;
@@ -633,6 +665,7 @@
 {
 	struct radeon_device *rdev = dev->dev_private;
 	struct drm_crtc *crtc;
+	int r;
 
 	if (dev == NULL || rdev == NULL) {
 		return -ENODEV;
@@ -643,26 +676,31 @@
 	/* unpin the front buffers */
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
-		struct radeon_object *robj;
+		struct radeon_bo *robj;
 
 		if (rfb == NULL || rfb->obj == NULL) {
 			continue;
 		}
 		robj = rfb->obj->driver_private;
-		if (robj != rdev->fbdev_robj) {
-			radeon_object_unpin(robj);
+		if (robj != rdev->fbdev_rbo) {
+			r = radeon_bo_reserve(robj, false);
+			if (unlikely(r == 0)) {
+				radeon_bo_unpin(robj);
+				radeon_bo_unreserve(robj);
+			}
 		}
 	}
 	/* evict vram memory */
-	radeon_object_evict_vram(rdev);
+	radeon_bo_evict_vram(rdev);
 	/* wait for gpu to finish processing current batch */
 	radeon_fence_wait_last(rdev);
 
 	radeon_save_bios_scratch_regs(rdev);
 
 	radeon_suspend(rdev);
+	radeon_hpd_fini(rdev);
 	/* evict remaining vram memory */
-	radeon_object_evict_vram(rdev);
+	radeon_bo_evict_vram(rdev);
 
 	pci_save_state(dev->pdev);
 	if (state.event == PM_EVENT_SUSPEND) {
@@ -695,6 +733,8 @@
 	fb_set_suspend(rdev->fbdev_info, 0);
 	release_console_sem();
 
+	/* reset hpd state */
+	radeon_hpd_init(rdev);
 	/* blat the mode back in */
 	drm_helper_resume_force_mode(dev);
 	return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index c85df4a..a133b83 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -250,6 +250,16 @@
 	"HDMI-B",
 };
 
+static const char *hpd_names[7] = {
+	"NONE",
+	"HPD1",
+	"HPD2",
+	"HPD3",
+	"HPD4",
+	"HPD5",
+	"HPD6",
+};
+
 static void radeon_print_display_setup(struct drm_device *dev)
 {
 	struct drm_connector *connector;
@@ -264,16 +274,18 @@
 		radeon_connector = to_radeon_connector(connector);
 		DRM_INFO("Connector %d:\n", i);
 		DRM_INFO("  %s\n", connector_names[connector->connector_type]);
+		if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+			DRM_INFO("  %s\n", hpd_names[radeon_connector->hpd.hpd]);
 		if (radeon_connector->ddc_bus)
 			DRM_INFO("  DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
 				 radeon_connector->ddc_bus->rec.mask_clk_reg,
 				 radeon_connector->ddc_bus->rec.mask_data_reg,
 				 radeon_connector->ddc_bus->rec.a_clk_reg,
 				 radeon_connector->ddc_bus->rec.a_data_reg,
-				 radeon_connector->ddc_bus->rec.put_clk_reg,
-				 radeon_connector->ddc_bus->rec.put_data_reg,
-				 radeon_connector->ddc_bus->rec.get_clk_reg,
-				 radeon_connector->ddc_bus->rec.get_data_reg);
+				 radeon_connector->ddc_bus->rec.en_clk_reg,
+				 radeon_connector->ddc_bus->rec.en_data_reg,
+				 radeon_connector->ddc_bus->rec.y_clk_reg,
+				 radeon_connector->ddc_bus->rec.y_data_reg);
 		DRM_INFO("  Encoders:\n");
 		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 			radeon_encoder = to_radeon_encoder(encoder);
@@ -324,6 +336,7 @@
 			ret = radeon_get_legacy_connector_info_from_table(dev);
 	}
 	if (ret) {
+		radeon_setup_encoder_clones(dev);
 		radeon_print_display_setup(dev);
 		list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
 			radeon_ddc_dump(drm_connector);
@@ -336,12 +349,17 @@
 {
 	int ret = 0;
 
+	if (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+		struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+		if (dig->dp_i2c_bus)
+			radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
+	}
 	if (!radeon_connector->ddc_bus)
 		return -1;
 	if (!radeon_connector->edid) {
-		radeon_i2c_do_lock(radeon_connector, 1);
+		radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
 		radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
-		radeon_i2c_do_lock(radeon_connector, 0);
+		radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
 	}
 
 	if (radeon_connector->edid) {
@@ -361,9 +379,9 @@
 
 	if (!radeon_connector->ddc_bus)
 		return -1;
-	radeon_i2c_do_lock(radeon_connector, 1);
+	radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
 	edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
-	radeon_i2c_do_lock(radeon_connector, 0);
+	radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
 	if (edid) {
 		kfree(edid);
 	}
@@ -542,6 +560,98 @@
 	*post_div_p = best_post_div;
 }
 
+void radeon_compute_pll_avivo(struct radeon_pll *pll,
+			      uint64_t freq,
+			      uint32_t *dot_clock_p,
+			      uint32_t *fb_div_p,
+			      uint32_t *frac_fb_div_p,
+			      uint32_t *ref_div_p,
+			      uint32_t *post_div_p,
+			      int flags)
+{
+	fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
+	fixed20_12 pll_out_max, pll_out_min;
+	fixed20_12 pll_in_max, pll_in_min;
+	fixed20_12 reference_freq;
+	fixed20_12 error, ffreq, a, b;
+
+	pll_out_max.full = rfixed_const(pll->pll_out_max);
+	pll_out_min.full = rfixed_const(pll->pll_out_min);
+	pll_in_max.full = rfixed_const(pll->pll_in_max);
+	pll_in_min.full = rfixed_const(pll->pll_in_min);
+	reference_freq.full = rfixed_const(pll->reference_freq);
+	do_div(freq, 10);
+	ffreq.full = rfixed_const(freq);
+	error.full = rfixed_const(100 * 100);
+
+	/* max p */
+	p.full = rfixed_div(pll_out_max, ffreq);
+	p.full = rfixed_floor(p);
+
+	/* min m */
+	m.full = rfixed_div(reference_freq, pll_in_max);
+	m.full = rfixed_ceil(m);
+
+	while (1) {
+		n.full = rfixed_div(ffreq, reference_freq);
+		n.full = rfixed_mul(n, m);
+		n.full = rfixed_mul(n, p);
+
+		f_vco.full = rfixed_div(n, m);
+		f_vco.full = rfixed_mul(f_vco, reference_freq);
+
+		f_pclk.full = rfixed_div(f_vco, p);
+
+		if (f_pclk.full > ffreq.full)
+			error.full = f_pclk.full - ffreq.full;
+		else
+			error.full = ffreq.full - f_pclk.full;
+		error.full = rfixed_div(error, f_pclk);
+		a.full = rfixed_const(100 * 100);
+		error.full = rfixed_mul(error, a);
+
+		a.full = rfixed_mul(m, p);
+		a.full = rfixed_div(n, a);
+		best_freq.full = rfixed_mul(reference_freq, a);
+
+		if (rfixed_trunc(error) < 25)
+			break;
+
+		a.full = rfixed_const(1);
+		m.full = m.full + a.full;
+		a.full = rfixed_div(reference_freq, m);
+		if (a.full >= pll_in_min.full)
+			continue;
+
+		m.full = rfixed_div(reference_freq, pll_in_max);
+		m.full = rfixed_ceil(m);
+		a.full= rfixed_const(1);
+		p.full = p.full - a.full;
+		a.full = rfixed_mul(p, ffreq);
+		if (a.full >= pll_out_min.full)
+			continue;
+		else {
+			DRM_ERROR("Unable to find pll dividers\n");
+			break;
+		}
+	}
+
+	a.full = rfixed_const(10);
+	b.full = rfixed_mul(n, a);
+
+	frac_n.full = rfixed_floor(n);
+	frac_n.full = rfixed_mul(frac_n, a);
+	frac_n.full = b.full - frac_n.full;
+
+	*dot_clock_p = rfixed_trunc(best_freq);
+	*fb_div_p = rfixed_trunc(n);
+	*frac_fb_div_p = rfixed_trunc(frac_n);
+	*ref_div_p = rfixed_trunc(m);
+	*post_div_p = rfixed_trunc(p);
+
+	DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
+}
+
 static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
 	struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
@@ -642,7 +752,7 @@
 			return -ENOMEM;
 
 		rdev->mode_info.coherent_mode_property->values[0] = 0;
-		rdev->mode_info.coherent_mode_property->values[0] = 1;
+		rdev->mode_info.coherent_mode_property->values[1] = 1;
 	}
 
 	if (!ASIC_IS_AVIVO(rdev)) {
@@ -666,7 +776,7 @@
 	if (!rdev->mode_info.load_detect_property)
 		return -ENOMEM;
 	rdev->mode_info.load_detect_property->values[0] = 0;
-	rdev->mode_info.load_detect_property->values[0] = 1;
+	rdev->mode_info.load_detect_property->values[1] = 1;
 
 	drm_mode_create_scaling_mode_property(rdev->ddev);
 
@@ -723,6 +833,8 @@
 	if (!ret) {
 		return ret;
 	}
+	/* initialize hpd */
+	radeon_hpd_init(rdev);
 	drm_helper_initial_config(rdev->ddev);
 	return 0;
 }
@@ -730,6 +842,7 @@
 void radeon_modeset_fini(struct radeon_device *rdev)
 {
 	if (rdev->mode_info.mode_config_initialized) {
+		radeon_hpd_fini(rdev);
 		drm_mode_config_cleanup(rdev->ddev);
 		rdev->mode_info.mode_config_initialized = false;
 	}
@@ -750,9 +863,17 @@
 		if (encoder->crtc != crtc)
 			continue;
 		if (first) {
-			radeon_crtc->rmx_type = radeon_encoder->rmx_type;
+			/* set scaling */
+			if (radeon_encoder->rmx_type == RMX_OFF)
+				radeon_crtc->rmx_type = RMX_OFF;
+			else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay ||
+				 mode->vdisplay < radeon_encoder->native_mode.vdisplay)
+				radeon_crtc->rmx_type = radeon_encoder->rmx_type;
+			else
+				radeon_crtc->rmx_type = RMX_OFF;
+			/* copy native mode */
 			memcpy(&radeon_crtc->native_mode,
-				&radeon_encoder->native_mode,
+			       &radeon_encoder->native_mode,
 				sizeof(struct drm_display_mode));
 			first = false;
 		} else {
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 7f50fb8..c5c45e6 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -86,6 +86,7 @@
 int radeon_testing = 0;
 int radeon_connector_table = 0;
 int radeon_tv = 1;
+int radeon_new_pll = 1;
 
 MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
 module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -120,6 +121,9 @@
 MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
 module_param_named(tv, radeon_tv, int, 0444);
 
+MODULE_PARM_DESC(new_pll, "Select new PLL code for AVIVO chips");
+module_param_named(new_pll, radeon_new_pll, int, 0444);
+
 static int radeon_suspend(struct drm_device *dev, pm_message_t state)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 350962e..e137852 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -1104,7 +1104,6 @@
 #       define R600_IT_WAIT_REG_MEM             0x00003C00
 #       define R600_IT_MEM_WRITE                0x00003D00
 #       define R600_IT_INDIRECT_BUFFER          0x00003200
-#       define R600_IT_CP_INTERRUPT             0x00004000
 #       define R600_IT_SURFACE_SYNC             0x00004300
 #              define R600_CB0_DEST_BASE_ENA    (1 << 6)
 #              define R600_TC_ACTION_ENA        (1 << 23)
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index d42bc51..b4f23ec 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -35,6 +35,51 @@
 bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
 				struct drm_display_mode *mode);
 
+static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_encoder *clone_encoder;
+	uint32_t index_mask = 0;
+	int count;
+
+	/* DIG routing gets problematic */
+	if (rdev->family >= CHIP_R600)
+		return index_mask;
+	/* LVDS/TV are too wacky */
+	if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+		return index_mask;
+	/* DVO requires 2x ppll clocks depending on tmds chip */
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT)
+		return index_mask;
+	
+	count = -1;
+	list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) {
+		struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder);
+		count++;
+
+		if (clone_encoder == encoder)
+			continue;
+		if (radeon_clone->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			continue;
+		if (radeon_clone->devices & ATOM_DEVICE_DFP2_SUPPORT)
+			continue;
+		else
+			index_mask |= (1 << count);
+	}
+	return index_mask;
+}
+
+void radeon_setup_encoder_clones(struct drm_device *dev)
+{
+	struct drm_encoder *encoder;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		encoder->possible_clones = radeon_encoder_clones(encoder);
+	}
+}
+
 uint32_t
 radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
 {
@@ -163,29 +208,6 @@
 	return NULL;
 }
 
-/* used for both atom and legacy */
-void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
-			   struct drm_display_mode *mode,
-			   struct drm_display_mode *adjusted_mode)
-{
-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	struct drm_device *dev = encoder->dev;
-	struct radeon_device *rdev = dev->dev_private;
-	struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
-
-	if (mode->hdisplay < native_mode->hdisplay ||
-	    mode->vdisplay < native_mode->vdisplay) {
-		int mode_id = adjusted_mode->base.id;
-		*adjusted_mode = *native_mode;
-		if (!ASIC_IS_AVIVO(rdev)) {
-			adjusted_mode->hdisplay = mode->hdisplay;
-			adjusted_mode->vdisplay = mode->vdisplay;
-		}
-		adjusted_mode->base.id = mode_id;
-	}
-}
-
-
 static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
 				   struct drm_display_mode *mode,
 				   struct drm_display_mode *adjusted_mode)
@@ -198,14 +220,24 @@
 	radeon_encoder_set_active_device(encoder);
 	drm_mode_set_crtcinfo(adjusted_mode, 0);
 
-	if (radeon_encoder->rmx_type != RMX_OFF)
-		radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
-
 	/* hw bug */
 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
 	    && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
 		adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
 
+	/* get the native mode for LVDS */
+	if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
+		struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+		int mode_id = adjusted_mode->base.id;
+		*adjusted_mode = *native_mode;
+		if (!ASIC_IS_AVIVO(rdev)) {
+			adjusted_mode->hdisplay = mode->hdisplay;
+			adjusted_mode->vdisplay = mode->vdisplay;
+		}
+		adjusted_mode->base.id = mode_id;
+	}
+
+	/* get the native mode for TV */
 	if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
 		struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
 		if (tv_dac) {
@@ -218,6 +250,12 @@
 		}
 	}
 
+	if (ASIC_IS_DCE3(rdev) &&
+	    (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT))) {
+		struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+		radeon_dp_set_link_config(connector, mode);
+	}
+
 	return true;
 }
 
@@ -392,7 +430,7 @@
 	LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
 };
 
-static void
+void
 atombios_digital_setup(struct drm_encoder *encoder, int action)
 {
 	struct drm_device *dev = encoder->dev;
@@ -522,6 +560,7 @@
 {
 	struct drm_connector *connector;
 	struct radeon_connector *radeon_connector;
+	struct radeon_connector_atom_dig *radeon_dig_connector;
 
 	connector = radeon_get_connector_for_encoder(encoder);
 	if (!connector)
@@ -551,10 +590,10 @@
 		return ATOM_ENCODER_MODE_LVDS;
 		break;
 	case DRM_MODE_CONNECTOR_DisplayPort:
-		/*if (radeon_output->MonType == MT_DP)
-		  return ATOM_ENCODER_MODE_DP;
-		  else*/
-		if (drm_detect_hdmi_monitor(radeon_connector->edid))
+		radeon_dig_connector = radeon_connector->con_priv;
+		if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+			return ATOM_ENCODER_MODE_DP;
+		else if (drm_detect_hdmi_monitor(radeon_connector->edid))
 			return ATOM_ENCODER_MODE_HDMI;
 		else
 			return ATOM_ENCODER_MODE_DVI;
@@ -573,6 +612,30 @@
 	}
 }
 
+/*
+ * DIG Encoder/Transmitter Setup
+ *
+ * DCE 3.0/3.1
+ * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA.
+ * Supports up to 3 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1 can drive UNIPHY link A or link B
+ * DIG2 can drive UNIPHY link B or LVTMA
+ *
+ * DCE 3.2
+ * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B).
+ * Supports up to 5 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1/2 can drive UNIPHY0/1/2 link A or link B
+ *
+ * Routing
+ * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
+ * Examples:
+ * crtc0 -> dig2 -> LVTMA   links A+B -> TMDS/HDMI
+ * crtc1 -> dig1 -> UNIPHY0 link  B   -> DP
+ * crtc0 -> dig1 -> UNIPHY2 link  A   -> LVDS
+ * crtc1 -> dig2 -> UNIPHY1 link  B+A -> TMDS/HDMI
+ */
 static void
 atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
 {
@@ -614,10 +677,17 @@
 	} else {
 		switch (radeon_encoder->encoder_id) {
 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-			index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
+			/* XXX doesn't really matter which dig encoder we pick as long as it's
+			 * not already in use
+			 */
+			if (dig_connector->linkb)
+				index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
+			else
+				index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
 			num = 1;
 			break;
 		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+			/* Only dig2 encoder can drive LVTMA */
 			index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
 			num = 2;
 			break;
@@ -652,19 +722,22 @@
 		}
 	}
 
-	if (radeon_encoder->pixel_clock > 165000) {
-		args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA_B;
-		args.ucLaneNum = 8;
-	} else {
-		if (dig_connector->linkb)
-			args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
-		else
-			args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
-		args.ucLaneNum = 4;
-	}
-
 	args.ucEncoderMode = atombios_get_encoder_mode(encoder);
 
+	if (args.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
+		if (dig_connector->dp_clock == 270000)
+			args.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+		args.ucLaneNum = dig_connector->dp_lane_count;
+	} else if (radeon_encoder->pixel_clock > 165000)
+		args.ucLaneNum = 8;
+	else
+		args.ucLaneNum = 4;
+
+	if (dig_connector->linkb)
+		args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
+	else
+		args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
+
 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
 }
@@ -674,8 +747,8 @@
 	DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
 };
 
-static void
-atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
+void
+atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
 {
 	struct drm_device *dev = encoder->dev;
 	struct radeon_device *rdev = dev->dev_private;
@@ -687,6 +760,7 @@
 	struct drm_connector *connector;
 	struct radeon_connector *radeon_connector;
 	struct radeon_connector_atom_dig *dig_connector;
+	bool is_dp = false;
 
 	connector = radeon_get_connector_for_encoder(encoder);
 	if (!connector)
@@ -704,6 +778,9 @@
 
 	dig_connector = radeon_connector->con_priv;
 
+	if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
+		is_dp = true;
+
 	memset(&args, 0, sizeof(args));
 
 	if (ASIC_IS_DCE32(rdev))
@@ -724,17 +801,23 @@
 	args.v1.ucAction = action;
 	if (action == ATOM_TRANSMITTER_ACTION_INIT) {
 		args.v1.usInitInfo = radeon_connector->connector_object_id;
+	} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+		args.v1.asMode.ucLaneSel = lane_num;
+		args.v1.asMode.ucLaneSet = lane_set;
 	} else {
-		if (radeon_encoder->pixel_clock > 165000)
+		if (is_dp)
+			args.v1.usPixelClock =
+				cpu_to_le16(dig_connector->dp_clock / 10);
+		else if (radeon_encoder->pixel_clock > 165000)
 			args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
 		else
 			args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
 	}
 	if (ASIC_IS_DCE32(rdev)) {
-		if (radeon_encoder->pixel_clock > 165000)
-			args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
 		if (dig->dig_block)
 			args.v2.acConfig.ucEncoderSel = 1;
+		if (dig_connector->linkb)
+			args.v2.acConfig.ucLinkSel = 1;
 
 		switch (radeon_encoder->encoder_id) {
 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
@@ -751,7 +834,9 @@
 			break;
 		}
 
-		if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+		if (is_dp)
+			args.v2.acConfig.fCoherentMode = 1;
+		else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
 			if (dig->coherent_mode)
 				args.v2.acConfig.fCoherentMode = 1;
 		}
@@ -760,17 +845,20 @@
 
 		switch (radeon_encoder->encoder_id) {
 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
+			/* XXX doesn't really matter which dig encoder we pick as long as it's
+			 * not already in use
+			 */
+			if (dig_connector->linkb)
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
+			else
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
 			if (rdev->flags & RADEON_IS_IGP) {
 				if (radeon_encoder->pixel_clock > 165000) {
-					args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
-							     ATOM_TRANSMITTER_CONFIG_LINKA_B);
 					if (dig_connector->igp_lane_info & 0x3)
 						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
 					else if (dig_connector->igp_lane_info & 0xc)
 						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
 				} else {
-					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
 					if (dig_connector->igp_lane_info & 0x1)
 						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
 					else if (dig_connector->igp_lane_info & 0x2)
@@ -780,35 +868,25 @@
 					else if (dig_connector->igp_lane_info & 0x8)
 						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
 				}
-			} else {
-				if (radeon_encoder->pixel_clock > 165000)
-					args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
-							     ATOM_TRANSMITTER_CONFIG_LINKA_B |
-							     ATOM_TRANSMITTER_CONFIG_LANE_0_7);
-				else {
-					if (dig_connector->linkb)
-						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
-					else
-						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
-				}
 			}
 			break;
 		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+			/* Only dig2 encoder can drive LVTMA */
 			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
-			if (radeon_encoder->pixel_clock > 165000)
-				args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
-						     ATOM_TRANSMITTER_CONFIG_LINKA_B |
-						     ATOM_TRANSMITTER_CONFIG_LANE_0_7);
-			else {
-				if (dig_connector->linkb)
-					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
-				else
-					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
-			}
 			break;
 		}
 
-		if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+		if (radeon_encoder->pixel_clock > 165000)
+			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
+
+		if (dig_connector->linkb)
+			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
+		else
+			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
+
+		if (is_dp)
+			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+		else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
 			if (dig->coherent_mode)
 				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
 		}
@@ -918,12 +996,16 @@
 	if (is_dig) {
 		switch (mode) {
 		case DRM_MODE_DPMS_ON:
-			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE);
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+			{
+				struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+				dp_link_train(encoder, connector);
+			}
 			break;
 		case DRM_MODE_DPMS_STANDBY:
 		case DRM_MODE_DPMS_SUSPEND:
 		case DRM_MODE_DPMS_OFF:
-			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE);
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
 			break;
 		}
 	} else {
@@ -1025,13 +1107,33 @@
 						args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
 					else
 						args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
-				} else
-					args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+				} else {
+					struct drm_connector *connector;
+					struct radeon_connector *radeon_connector;
+					struct radeon_connector_atom_dig *dig_connector;
+
+					connector = radeon_get_connector_for_encoder(encoder);
+					if (!connector)
+						return;
+					radeon_connector = to_radeon_connector(connector);
+					if (!radeon_connector->con_priv)
+						return;
+					dig_connector = radeon_connector->con_priv;
+
+					/* XXX doesn't really matter which dig encoder we pick as long as it's
+					 * not already in use
+					 */
+					if (dig_connector->linkb)
+						args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+					else
+						args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+				}
 				break;
 			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
 				args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
 				break;
 			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+				/* Only dig2 encoder can drive LVTMA */
 				args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
 				break;
 			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
@@ -1104,11 +1206,14 @@
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
 
-	if (radeon_encoder->enc_priv) {
-		struct radeon_encoder_atom_dig *dig;
+	if (radeon_encoder->active_device &
+	    (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
+		if (radeon_encoder->enc_priv) {
+			struct radeon_encoder_atom_dig *dig;
 
-		dig = radeon_encoder->enc_priv;
-		dig->dig_block = radeon_crtc->crtc_id;
+			dig = radeon_encoder->enc_priv;
+			dig->dig_block = radeon_crtc->crtc_id;
+		}
 	}
 	radeon_encoder->pixel_clock = adjusted_mode->clock;
 
@@ -1134,14 +1239,14 @@
 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
 		/* disable the encoder and transmitter */
-		atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE);
+		atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
 		atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
 
 		/* setup and enable the encoder and transmitter */
 		atombios_dig_encoder_setup(encoder, ATOM_ENABLE);
-		atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT);
-		atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP);
-		atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE);
+		atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
+		atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
+		atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
 		break;
 	case ENCODER_OBJECT_ID_INTERNAL_DDI:
 		atombios_ddia_setup(encoder, ATOM_ENABLE);
@@ -1354,7 +1459,6 @@
 		encoder->possible_crtcs = 0x1;
 	else
 		encoder->possible_crtcs = 0x3;
-	encoder->possible_clones = 0;
 
 	radeon_encoder->enc_priv = NULL;
 
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index d10eb43..3ba213d 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -140,7 +140,7 @@
 	struct radeon_framebuffer *rfb;
 	struct drm_mode_fb_cmd mode_cmd;
 	struct drm_gem_object *gobj = NULL;
-	struct radeon_object *robj = NULL;
+	struct radeon_bo *rbo = NULL;
 	struct device *device = &rdev->pdev->dev;
 	int size, aligned_size, ret;
 	u64 fb_gpuaddr;
@@ -168,14 +168,14 @@
 	ret = radeon_gem_object_create(rdev, aligned_size, 0,
 			RADEON_GEM_DOMAIN_VRAM,
 			false, ttm_bo_type_kernel,
-			false, &gobj);
+			&gobj);
 	if (ret) {
 		printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
 		       surface_width, surface_height);
 		ret = -ENOMEM;
 		goto out;
 	}
-	robj = gobj->driver_private;
+	rbo = gobj->driver_private;
 
 	if (fb_tiled)
 		tiling_flags = RADEON_TILING_MACRO;
@@ -192,8 +192,13 @@
 	}
 #endif
 
-	if (tiling_flags)
-		radeon_object_set_tiling_flags(robj, tiling_flags | RADEON_TILING_SURFACE, mode_cmd.pitch);
+	if (tiling_flags) {
+		ret = radeon_bo_set_tiling_flags(rbo,
+					tiling_flags | RADEON_TILING_SURFACE,
+					mode_cmd.pitch);
+		if (ret)
+			dev_err(rdev->dev, "FB failed to set tiling flags\n");
+	}
 	mutex_lock(&rdev->ddev->struct_mutex);
 	fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
 	if (fb == NULL) {
@@ -201,10 +206,19 @@
 		ret = -ENOMEM;
 		goto out_unref;
 	}
-	ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
+	ret = radeon_bo_reserve(rbo, false);
+	if (unlikely(ret != 0))
+		goto out_unref;
+	ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
 	if (ret) {
-		printk(KERN_ERR "failed to pin framebuffer\n");
-		ret = -ENOMEM;
+		radeon_bo_unreserve(rbo);
+		goto out_unref;
+	}
+	if (fb_tiled)
+		radeon_bo_check_tiling(rbo, 0, 0);
+	ret = radeon_bo_kmap(rbo, &fbptr);
+	radeon_bo_unreserve(rbo);
+	if (ret) {
 		goto out_unref;
 	}
 
@@ -213,7 +227,7 @@
 	*fb_p = fb;
 	rfb = to_radeon_framebuffer(fb);
 	rdev->fbdev_rfb = rfb;
-	rdev->fbdev_robj = robj;
+	rdev->fbdev_rbo = rbo;
 
 	info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
 	if (info == NULL) {
@@ -234,15 +248,7 @@
 	if (ret)
 		goto out_unref;
 
-	if (fb_tiled)
-		radeon_object_check_tiling(robj, 0, 0);
-
-	ret = radeon_object_kmap(robj, &fbptr);
-	if (ret) {
-		goto out_unref;
-	}
-
-	memset_io(fbptr, 0, aligned_size);
+	memset_io(fbptr, 0xff, aligned_size);
 
 	strcpy(info->fix.id, "radeondrmfb");
 
@@ -288,8 +294,12 @@
 	return 0;
 
 out_unref:
-	if (robj) {
-		radeon_object_kunmap(robj);
+	if (rbo) {
+		ret = radeon_bo_reserve(rbo, false);
+		if (likely(ret == 0)) {
+			radeon_bo_kunmap(rbo);
+			radeon_bo_unreserve(rbo);
+		}
 	}
 	if (fb && ret) {
 		list_del(&fb->filp_head);
@@ -321,14 +331,22 @@
 
 int radeonfb_probe(struct drm_device *dev)
 {
-	return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create);
+	struct radeon_device *rdev = dev->dev_private;
+	int bpp_sel = 32;
+
+	/* select 8 bpp console on RN50 or 16MB cards */
+	if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
+		bpp_sel = 8;
+
+	return drm_fb_helper_single_fb_probe(dev, bpp_sel, &radeonfb_create);
 }
 
 int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
 {
 	struct fb_info *info;
 	struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
-	struct radeon_object *robj;
+	struct radeon_bo *rbo;
+	int r;
 
 	if (!fb) {
 		return -EINVAL;
@@ -336,10 +354,14 @@
 	info = fb->fbdev;
 	if (info) {
 		struct radeon_fb_device *rfbdev = info->par;
-		robj = rfb->obj->driver_private;
+		rbo = rfb->obj->driver_private;
 		unregister_framebuffer(info);
-		radeon_object_kunmap(robj);
-		radeon_object_unpin(robj);
+		r = radeon_bo_reserve(rbo, false);
+		if (likely(r == 0)) {
+			radeon_bo_kunmap(rbo);
+			radeon_bo_unpin(rbo);
+			radeon_bo_unreserve(rbo);
+		}
 		drm_fb_helper_free(&rfbdev->helper);
 		framebuffer_release(info);
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 3beb26d..cb4cd97 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -168,37 +168,6 @@
 	return signaled;
 }
 
-int r600_fence_wait(struct radeon_fence *fence,  bool intr, bool lazy)
-{
-	struct radeon_device *rdev;
-	int ret = 0;
-
-	rdev = fence->rdev;
-
-	__set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
-
-	while (1) {
-		if (radeon_fence_signaled(fence))
-			break;
-
-		if (time_after_eq(jiffies, fence->timeout)) {
-			ret = -EBUSY;
-			break;
-		}
-
-		if (lazy)
-			schedule_timeout(1);
-
-		if (intr && signal_pending(current)) {
-			ret = -ERESTARTSYS;
-			break;
-		}
-	}
-	__set_current_state(TASK_RUNNING);
-	return ret;
-}
-
-
 int radeon_fence_wait(struct radeon_fence *fence, bool intr)
 {
 	struct radeon_device *rdev;
@@ -216,13 +185,6 @@
 		return 0;
 	}
 
-	if (rdev->family >= CHIP_R600) {
-		r = r600_fence_wait(fence, intr, 0);
-		if (r == -ERESTARTSYS)
-			return -EBUSY;
-		return r;
-	}
-
 retry:
 	cur_jiffies = jiffies;
 	timeout = HZ / 100;
@@ -231,14 +193,17 @@
 	}
 
 	if (intr) {
+		radeon_irq_kms_sw_irq_get(rdev);
 		r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
 				radeon_fence_signaled(fence), timeout);
-		if (unlikely(r == -ERESTARTSYS)) {
-			return -EBUSY;
-		}
+		radeon_irq_kms_sw_irq_put(rdev);
+		if (unlikely(r < 0))
+			return r;
 	} else {
+		radeon_irq_kms_sw_irq_get(rdev);
 		r = wait_event_timeout(rdev->fence_drv.queue,
 			 radeon_fence_signaled(fence), timeout);
+		radeon_irq_kms_sw_irq_put(rdev);
 	}
 	if (unlikely(!radeon_fence_signaled(fence))) {
 		if (unlikely(r == 0)) {
diff --git a/drivers/gpu/drm/radeon/radeon_fixed.h b/drivers/gpu/drm/radeon/radeon_fixed.h
index 90187d1..3d4d84e 100644
--- a/drivers/gpu/drm/radeon/radeon_fixed.h
+++ b/drivers/gpu/drm/radeon/radeon_fixed.h
@@ -38,6 +38,23 @@
 #define fixed_init_half(A) { .full = rfixed_const_half((A)) }
 #define rfixed_trunc(A) ((A).full >> 12)
 
+static inline u32 rfixed_floor(fixed20_12 A)
+{
+	u32 non_frac = rfixed_trunc(A);
+
+	return rfixed_const(non_frac);
+}
+
+static inline u32 rfixed_ceil(fixed20_12 A)
+{
+	u32 non_frac = rfixed_trunc(A);
+
+	if (A.full > rfixed_const(non_frac))
+		return rfixed_const(non_frac + 1);
+	else
+		return rfixed_const(non_frac);
+}
+
 static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B)
 {
 	u64 tmp = ((u64)A.full << 13);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index a68d756..e73d56e 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -78,11 +78,9 @@
 	int r;
 
 	if (rdev->gart.table.vram.robj == NULL) {
-		r = radeon_object_create(rdev, NULL,
-					 rdev->gart.table_size,
-					 true,
-					 RADEON_GEM_DOMAIN_VRAM,
-					 false, &rdev->gart.table.vram.robj);
+		r = radeon_bo_create(rdev, NULL, rdev->gart.table_size,
+					true, RADEON_GEM_DOMAIN_VRAM,
+					&rdev->gart.table.vram.robj);
 		if (r) {
 			return r;
 		}
@@ -95,32 +93,38 @@
 	uint64_t gpu_addr;
 	int r;
 
-	r = radeon_object_pin(rdev->gart.table.vram.robj,
-			      RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
+	r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = radeon_bo_pin(rdev->gart.table.vram.robj,
+				RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
 	if (r) {
-		radeon_object_unref(&rdev->gart.table.vram.robj);
+		radeon_bo_unreserve(rdev->gart.table.vram.robj);
 		return r;
 	}
-	r = radeon_object_kmap(rdev->gart.table.vram.robj,
-			       (void **)&rdev->gart.table.vram.ptr);
-	if (r) {
-		radeon_object_unpin(rdev->gart.table.vram.robj);
-		radeon_object_unref(&rdev->gart.table.vram.robj);
-		DRM_ERROR("radeon: failed to map gart vram table.\n");
-		return r;
-	}
+	r = radeon_bo_kmap(rdev->gart.table.vram.robj,
+				(void **)&rdev->gart.table.vram.ptr);
+	if (r)
+		radeon_bo_unpin(rdev->gart.table.vram.robj);
+	radeon_bo_unreserve(rdev->gart.table.vram.robj);
 	rdev->gart.table_addr = gpu_addr;
-	return 0;
+	return r;
 }
 
 void radeon_gart_table_vram_free(struct radeon_device *rdev)
 {
+	int r;
+
 	if (rdev->gart.table.vram.robj == NULL) {
 		return;
 	}
-	radeon_object_kunmap(rdev->gart.table.vram.robj);
-	radeon_object_unpin(rdev->gart.table.vram.robj);
-	radeon_object_unref(&rdev->gart.table.vram.robj);
+	r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+	if (likely(r == 0)) {
+		radeon_bo_kunmap(rdev->gart.table.vram.robj);
+		radeon_bo_unpin(rdev->gart.table.vram.robj);
+		radeon_bo_unreserve(rdev->gart.table.vram.robj);
+	}
+	radeon_bo_unref(&rdev->gart.table.vram.robj);
 }
 
 
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index d880edf..2944486 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -38,22 +38,21 @@
 
 void radeon_gem_object_free(struct drm_gem_object *gobj)
 {
-	struct radeon_object *robj = gobj->driver_private;
+	struct radeon_bo *robj = gobj->driver_private;
 
 	gobj->driver_private = NULL;
 	if (robj) {
-		radeon_object_unref(&robj);
+		radeon_bo_unref(&robj);
 	}
 }
 
 int radeon_gem_object_create(struct radeon_device *rdev, int size,
-			     int alignment, int initial_domain,
-			     bool discardable, bool kernel,
-			     bool interruptible,
-			     struct drm_gem_object **obj)
+				int alignment, int initial_domain,
+				bool discardable, bool kernel,
+				struct drm_gem_object **obj)
 {
 	struct drm_gem_object *gobj;
-	struct radeon_object *robj;
+	struct radeon_bo *robj;
 	int r;
 
 	*obj = NULL;
@@ -65,8 +64,7 @@
 	if (alignment < PAGE_SIZE) {
 		alignment = PAGE_SIZE;
 	}
-	r = radeon_object_create(rdev, gobj, size, kernel, initial_domain,
-				 interruptible, &robj);
+	r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj);
 	if (r) {
 		DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
 			  size, initial_domain, alignment);
@@ -83,33 +81,33 @@
 int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
 			  uint64_t *gpu_addr)
 {
-	struct radeon_object *robj = obj->driver_private;
-	uint32_t flags;
+	struct radeon_bo *robj = obj->driver_private;
+	int r;
 
-	switch (pin_domain) {
-	case RADEON_GEM_DOMAIN_VRAM:
-		flags = TTM_PL_FLAG_VRAM;
-		break;
-	case RADEON_GEM_DOMAIN_GTT:
-		flags = TTM_PL_FLAG_TT;
-		break;
-	default:
-		flags = TTM_PL_FLAG_SYSTEM;
-		break;
-	}
-	return radeon_object_pin(robj, flags, gpu_addr);
+	r = radeon_bo_reserve(robj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = radeon_bo_pin(robj, pin_domain, gpu_addr);
+	radeon_bo_unreserve(robj);
+	return r;
 }
 
 void radeon_gem_object_unpin(struct drm_gem_object *obj)
 {
-	struct radeon_object *robj = obj->driver_private;
-	radeon_object_unpin(robj);
+	struct radeon_bo *robj = obj->driver_private;
+	int r;
+
+	r = radeon_bo_reserve(robj, false);
+	if (likely(r == 0)) {
+		radeon_bo_unpin(robj);
+		radeon_bo_unreserve(robj);
+	}
 }
 
 int radeon_gem_set_domain(struct drm_gem_object *gobj,
 			  uint32_t rdomain, uint32_t wdomain)
 {
-	struct radeon_object *robj;
+	struct radeon_bo *robj;
 	uint32_t domain;
 	int r;
 
@@ -127,11 +125,12 @@
 	}
 	if (domain == RADEON_GEM_DOMAIN_CPU) {
 		/* Asking for cpu access wait for object idle */
-		r = radeon_object_wait(robj);
+		r = radeon_bo_wait(robj, NULL, false);
 		if (r) {
 			printk(KERN_ERR "Failed to wait for object !\n");
 			return r;
 		}
+		radeon_hdp_flush(robj->rdev);
 	}
 	return 0;
 }
@@ -144,7 +143,7 @@
 
 void radeon_gem_fini(struct radeon_device *rdev)
 {
-	radeon_object_force_delete(rdev);
+	radeon_bo_force_delete(rdev);
 }
 
 
@@ -158,9 +157,13 @@
 	struct drm_radeon_gem_info *args = data;
 
 	args->vram_size = rdev->mc.real_vram_size;
-	/* FIXME: report somethings that makes sense */
-	args->vram_visible = rdev->mc.real_vram_size - (4 * 1024 * 1024);
-	args->gart_size = rdev->mc.gtt_size;
+	args->vram_visible = rdev->mc.real_vram_size;
+	if (rdev->stollen_vga_memory)
+		args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
+	if (rdev->fbdev_rbo)
+		args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo);
+	args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
+		RADEON_IB_POOL_SIZE*64*1024;
 	return 0;
 }
 
@@ -192,8 +195,8 @@
 	/* create a gem object to contain this object in */
 	args->size = roundup(args->size, PAGE_SIZE);
 	r = radeon_gem_object_create(rdev, args->size, args->alignment,
-				     args->initial_domain, false,
-				     false, true, &gobj);
+					args->initial_domain, false,
+					false, &gobj);
 	if (r) {
 		return r;
 	}
@@ -218,7 +221,7 @@
 	 * just validate the BO into a certain domain */
 	struct drm_radeon_gem_set_domain *args = data;
 	struct drm_gem_object *gobj;
-	struct radeon_object *robj;
+	struct radeon_bo *robj;
 	int r;
 
 	/* for now if someone requests domain CPU -
@@ -244,19 +247,18 @@
 {
 	struct drm_radeon_gem_mmap *args = data;
 	struct drm_gem_object *gobj;
-	struct radeon_object *robj;
-	int r;
+	struct radeon_bo *robj;
 
 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
 	if (gobj == NULL) {
 		return -EINVAL;
 	}
 	robj = gobj->driver_private;
-	r = radeon_object_mmap(robj, &args->addr_ptr);
+	args->addr_ptr = radeon_bo_mmap_offset(robj);
 	mutex_lock(&dev->struct_mutex);
 	drm_gem_object_unreference(gobj);
 	mutex_unlock(&dev->struct_mutex);
-	return r;
+	return 0;
 }
 
 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
@@ -264,16 +266,16 @@
 {
 	struct drm_radeon_gem_busy *args = data;
 	struct drm_gem_object *gobj;
-	struct radeon_object *robj;
+	struct radeon_bo *robj;
 	int r;
-	uint32_t cur_placement;
+	uint32_t cur_placement = 0;
 
 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
 	if (gobj == NULL) {
 		return -EINVAL;
 	}
 	robj = gobj->driver_private;
-	r = radeon_object_busy_domain(robj, &cur_placement);
+	r = radeon_bo_wait(robj, &cur_placement, true);
 	switch (cur_placement) {
 	case TTM_PL_VRAM:
 		args->domain = RADEON_GEM_DOMAIN_VRAM;
@@ -297,7 +299,7 @@
 {
 	struct drm_radeon_gem_wait_idle *args = data;
 	struct drm_gem_object *gobj;
-	struct radeon_object *robj;
+	struct radeon_bo *robj;
 	int r;
 
 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
@@ -305,10 +307,11 @@
 		return -EINVAL;
 	}
 	robj = gobj->driver_private;
-	r = radeon_object_wait(robj);
+	r = radeon_bo_wait(robj, NULL, false);
 	mutex_lock(&dev->struct_mutex);
 	drm_gem_object_unreference(gobj);
 	mutex_unlock(&dev->struct_mutex);
+	radeon_hdp_flush(robj->rdev);
 	return r;
 }
 
@@ -317,7 +320,7 @@
 {
 	struct drm_radeon_gem_set_tiling *args = data;
 	struct drm_gem_object *gobj;
-	struct radeon_object *robj;
+	struct radeon_bo *robj;
 	int r = 0;
 
 	DRM_DEBUG("%d \n", args->handle);
@@ -325,7 +328,7 @@
 	if (gobj == NULL)
 		return -EINVAL;
 	robj = gobj->driver_private;
-	radeon_object_set_tiling_flags(robj, args->tiling_flags, args->pitch);
+	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
 	mutex_lock(&dev->struct_mutex);
 	drm_gem_object_unreference(gobj);
 	mutex_unlock(&dev->struct_mutex);
@@ -337,16 +340,19 @@
 {
 	struct drm_radeon_gem_get_tiling *args = data;
 	struct drm_gem_object *gobj;
-	struct radeon_object *robj;
+	struct radeon_bo *rbo;
 	int r = 0;
 
 	DRM_DEBUG("\n");
 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
 	if (gobj == NULL)
 		return -EINVAL;
-	robj = gobj->driver_private;
-	radeon_object_get_tiling_flags(robj, &args->tiling_flags,
-				       &args->pitch);
+	rbo = gobj->driver_private;
+	r = radeon_bo_reserve(rbo, false);
+	if (unlikely(r != 0))
+		return r;
+	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
+	radeon_bo_unreserve(rbo);
 	mutex_lock(&dev->struct_mutex);
 	drm_gem_object_unreference(gobj);
 	mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index dd438d3..da3da1e 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -59,35 +59,43 @@
 }
 
 
-void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state)
+void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
 {
-	struct radeon_device *rdev = radeon_connector->base.dev->dev_private;
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
 	uint32_t temp;
-	struct radeon_i2c_bus_rec *rec = &radeon_connector->ddc_bus->rec;
 
 	/* RV410 appears to have a bug where the hw i2c in reset
 	 * holds the i2c port in a bad state - switch hw i2c away before
 	 * doing DDC - do this for all r200s/r300s/r400s for safety sake
 	 */
-	if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
-		if (rec->a_clk_reg == RADEON_GPIO_MONID) {
-			WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
-						R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
-		} else {
-			WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
-						R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
+	if (rec->hw_capable) {
+		if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
+			if (rec->a_clk_reg == RADEON_GPIO_MONID) {
+				WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
+							       R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
+			} else {
+				WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
+							       R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
+			}
 		}
 	}
-	if (lock_state) {
-		temp = RREG32(rec->a_clk_reg);
-		temp &= ~(rec->a_clk_mask);
-		WREG32(rec->a_clk_reg, temp);
 
-		temp = RREG32(rec->a_data_reg);
-		temp &= ~(rec->a_data_mask);
-		WREG32(rec->a_data_reg, temp);
-	}
+	/* clear the output pin values */
+	temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
+	WREG32(rec->a_clk_reg, temp);
 
+	temp = RREG32(rec->a_data_reg) & ~rec->a_data_mask;
+	WREG32(rec->a_data_reg, temp);
+
+	/* set the pins to input */
+	temp = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
+	WREG32(rec->en_clk_reg, temp);
+
+	temp = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
+	WREG32(rec->en_data_reg, temp);
+
+	/* mask the gpio pins for software use */
 	temp = RREG32(rec->mask_clk_reg);
 	if (lock_state)
 		temp |= rec->mask_clk_mask;
@@ -112,8 +120,9 @@
 	struct radeon_i2c_bus_rec *rec = &i2c->rec;
 	uint32_t val;
 
-	val = RREG32(rec->get_clk_reg);
-	val &= rec->get_clk_mask;
+	/* read the value off the pin */
+	val = RREG32(rec->y_clk_reg);
+	val &= rec->y_clk_mask;
 
 	return (val != 0);
 }
@@ -126,8 +135,10 @@
 	struct radeon_i2c_bus_rec *rec = &i2c->rec;
 	uint32_t val;
 
-	val = RREG32(rec->get_data_reg);
-	val &= rec->get_data_mask;
+	/* read the value off the pin */
+	val = RREG32(rec->y_data_reg);
+	val &= rec->y_data_mask;
+
 	return (val != 0);
 }
 
@@ -138,9 +149,10 @@
 	struct radeon_i2c_bus_rec *rec = &i2c->rec;
 	uint32_t val;
 
-	val = RREG32(rec->put_clk_reg) & (uint32_t)~(rec->put_clk_mask);
-	val |= clock ? 0 : rec->put_clk_mask;
-	WREG32(rec->put_clk_reg, val);
+	/* set pin direction */
+	val = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
+	val |= clock ? 0 : rec->en_clk_mask;
+	WREG32(rec->en_clk_reg, val);
 }
 
 static void set_data(void *i2c_priv, int data)
@@ -150,14 +162,15 @@
 	struct radeon_i2c_bus_rec *rec = &i2c->rec;
 	uint32_t val;
 
-	val = RREG32(rec->put_data_reg) & (uint32_t)~(rec->put_data_mask);
-	val |= data ? 0 : rec->put_data_mask;
-	WREG32(rec->put_data_reg, val);
+	/* set pin direction */
+	val = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
+	val |= data ? 0 : rec->en_data_mask;
+	WREG32(rec->en_data_reg, val);
 }
 
 struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
-		struct radeon_i2c_bus_rec *rec,
-		const char *name)
+					  struct radeon_i2c_bus_rec *rec,
+					  const char *name)
 {
 	struct radeon_i2c_chan *i2c;
 	int ret;
@@ -167,20 +180,19 @@
 		return NULL;
 
 	i2c->adapter.owner = THIS_MODULE;
-	i2c->adapter.algo_data = &i2c->algo;
 	i2c->dev = dev;
-	i2c->algo.setsda = set_data;
-	i2c->algo.setscl = set_clock;
-	i2c->algo.getsda = get_data;
-	i2c->algo.getscl = get_clock;
-	i2c->algo.udelay = 20;
+	i2c_set_adapdata(&i2c->adapter, i2c);
+	i2c->adapter.algo_data = &i2c->algo.bit;
+	i2c->algo.bit.setsda = set_data;
+	i2c->algo.bit.setscl = set_clock;
+	i2c->algo.bit.getsda = get_data;
+	i2c->algo.bit.getscl = get_clock;
+	i2c->algo.bit.udelay = 20;
 	/* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
 	 * make this, 2 jiffies is a lot more reliable */
-	i2c->algo.timeout = 2;
-	i2c->algo.data = i2c;
+	i2c->algo.bit.timeout = 2;
+	i2c->algo.bit.data = i2c;
 	i2c->rec = *rec;
-	i2c_set_adapdata(&i2c->adapter, i2c);
-
 	ret = i2c_bit_add_bus(&i2c->adapter);
 	if (ret) {
 		DRM_INFO("Failed to register i2c %s\n", name);
@@ -194,6 +206,38 @@
 
 }
 
+struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
+					     struct radeon_i2c_bus_rec *rec,
+					     const char *name)
+{
+	struct radeon_i2c_chan *i2c;
+	int ret;
+
+	i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
+	if (i2c == NULL)
+		return NULL;
+
+	i2c->rec = *rec;
+	i2c->adapter.owner = THIS_MODULE;
+	i2c->dev = dev;
+	i2c_set_adapdata(&i2c->adapter, i2c);
+	i2c->adapter.algo_data = &i2c->algo.dp;
+	i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch;
+	i2c->algo.dp.address = 0;
+	ret = i2c_dp_aux_add_bus(&i2c->adapter);
+	if (ret) {
+		DRM_INFO("Failed to register i2c %s\n", name);
+		goto out_free;
+	}
+
+	return i2c;
+out_free:
+	kfree(i2c);
+	return NULL;
+
+}
+
+
 void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
 {
 	if (!i2c)
@@ -207,3 +251,59 @@
 {
 	return NULL;
 }
+
+void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
+			    u8 slave_addr,
+			    u8 addr,
+			    u8 *val)
+{
+	u8 out_buf[2];
+	u8 in_buf[2];
+	struct i2c_msg msgs[] = {
+		{
+			.addr = slave_addr,
+			.flags = 0,
+			.len = 1,
+			.buf = out_buf,
+		},
+		{
+			.addr = slave_addr,
+			.flags = I2C_M_RD,
+			.len = 1,
+			.buf = in_buf,
+		}
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = 0;
+
+	if (i2c_transfer(&i2c_bus->adapter, msgs, 2) == 2) {
+		*val = in_buf[0];
+		DRM_DEBUG("val = 0x%02x\n", *val);
+	} else {
+		DRM_ERROR("i2c 0x%02x 0x%02x read failed\n",
+			  addr, *val);
+	}
+}
+
+void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c_bus,
+			    u8 slave_addr,
+			    u8 addr,
+			    u8 val)
+{
+	uint8_t out_buf[2];
+	struct i2c_msg msg = {
+		.addr = slave_addr,
+		.flags = 0,
+		.len = 2,
+		.buf = out_buf,
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = val;
+
+	if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
+		DRM_ERROR("i2c 0x%02x 0x%02x write failed\n",
+			  addr, val);
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index a0fe623..9223296 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -39,11 +39,32 @@
 	return radeon_irq_process(rdev);
 }
 
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+static void radeon_hotplug_work_func(struct work_struct *work)
+{
+	struct radeon_device *rdev = container_of(work, struct radeon_device,
+						  hotplug_work);
+	struct drm_device *dev = rdev->ddev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_connector *connector;
+
+	if (mode_config->num_connector) {
+		list_for_each_entry(connector, &mode_config->connector_list, head)
+			radeon_connector_hotplug(connector);
+	}
+	/* Just fire off a uevent and let userspace tell us what to do */
+	drm_sysfs_hotplug_event(dev);
+}
+
 void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
 {
 	struct radeon_device *rdev = dev->dev_private;
 	unsigned i;
 
+	INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+
 	/* Disable *all* interrupts */
 	rdev->irq.sw_int = false;
 	for (i = 0; i < 2; i++) {
@@ -87,17 +108,25 @@
 
 	if (rdev->flags & RADEON_SINGLE_CRTC)
 		num_crtc = 1;
-
+	spin_lock_init(&rdev->irq.sw_lock);
 	r = drm_vblank_init(rdev->ddev, num_crtc);
 	if (r) {
 		return r;
 	}
 	/* enable msi */
 	rdev->msi_enabled = 0;
-	if (rdev->family >= CHIP_RV380) {
+	/* MSIs don't seem to work on my rs780;
+	 * not sure about rs880 or other rs780s.
+	 * Needs more investigation.
+	 */
+	if ((rdev->family >= CHIP_RV380) &&
+	    (rdev->family != CHIP_RS780) &&
+	    (rdev->family != CHIP_RS880)) {
 		int ret = pci_enable_msi(rdev->pdev);
-		if (!ret)
+		if (!ret) {
 			rdev->msi_enabled = 1;
+			DRM_INFO("radeon: using MSI.\n");
+		}
 	}
 	drm_irq_install(rdev->ddev);
 	rdev->irq.installed = true;
@@ -114,3 +143,29 @@
 			pci_disable_msi(rdev->pdev);
 	}
 }
+
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
+	if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount == 1)) {
+		rdev->irq.sw_int = true;
+		radeon_irq_set(rdev);
+	}
+	spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
+}
+
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
+	BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount <= 0);
+	if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount == 0)) {
+		rdev->irq.sw_int = false;
+		radeon_irq_set(rdev);
+	}
+	spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index ba12862..f23b056 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -30,10 +30,19 @@
 #include "radeon.h"
 #include "radeon_drm.h"
 
+int radeon_driver_unload_kms(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
 
-/*
- * Driver load/unload
- */
+	if (rdev == NULL)
+		return 0;
+	radeon_modeset_fini(rdev);
+	radeon_device_fini(rdev);
+	kfree(rdev);
+	dev->dev_private = NULL;
+	return 0;
+}
+
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
 {
 	struct radeon_device *rdev;
@@ -62,31 +71,20 @@
 	 */
 	r = radeon_device_init(rdev, dev, dev->pdev, flags);
 	if (r) {
-		DRM_ERROR("Fatal error while trying to initialize radeon.\n");
-		return r;
+		dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
+		goto out;
 	}
 	/* Again modeset_init should fail only on fatal error
 	 * otherwise it should provide enough functionalities
 	 * for shadowfb to run
 	 */
 	r = radeon_modeset_init(rdev);
-	if (r) {
-		return r;
-	}
-	return 0;
-}
-
-int radeon_driver_unload_kms(struct drm_device *dev)
-{
-	struct radeon_device *rdev = dev->dev_private;
-
-	if (rdev == NULL)
-		return 0;
-	radeon_modeset_fini(rdev);
-	radeon_device_fini(rdev);
-	kfree(rdev);
-	dev->dev_private = NULL;
-	return 0;
+	if (r)
+		dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
+out:
+	if (r)
+		radeon_driver_unload_kms(dev);
+	return r;
 }
 
 
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 8d0b7aa..b82ede9 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -30,6 +30,18 @@
 #include "radeon.h"
 #include "atom.h"
 
+static void radeon_overscan_setup(struct drm_crtc *crtc,
+				  struct drm_display_mode *mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+	WREG32(RADEON_OVR_CLR + radeon_crtc->crtc_offset, 0);
+	WREG32(RADEON_OVR_WID_LEFT_RIGHT + radeon_crtc->crtc_offset, 0);
+	WREG32(RADEON_OVR_WID_TOP_BOTTOM + radeon_crtc->crtc_offset, 0);
+}
+
 static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
 				       struct drm_display_mode *mode,
 				       struct drm_display_mode *adjusted_mode)
@@ -292,8 +304,7 @@
 	uint32_t mask;
 
 	if (radeon_crtc->crtc_id)
-		mask = (RADEON_CRTC2_EN |
-			RADEON_CRTC2_DISP_DIS |
+		mask = (RADEON_CRTC2_DISP_DIS |
 			RADEON_CRTC2_VSYNC_DIS |
 			RADEON_CRTC2_HSYNC_DIS |
 			RADEON_CRTC2_DISP_REQ_EN_B);
@@ -305,7 +316,7 @@
 	switch (mode) {
 	case DRM_MODE_DPMS_ON:
 		if (radeon_crtc->crtc_id)
-			WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~mask);
+			WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask));
 		else {
 			WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN |
 									 RADEON_CRTC_DISP_REQ_EN_B));
@@ -319,7 +330,7 @@
 	case DRM_MODE_DPMS_OFF:
 		drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
 		if (radeon_crtc->crtc_id)
-			WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask);
+			WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
 		else {
 			WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN |
 										    RADEON_CRTC_DISP_REQ_EN_B));
@@ -400,14 +411,21 @@
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 	struct radeon_framebuffer *radeon_fb;
 	struct drm_gem_object *obj;
+	struct radeon_bo *rbo;
 	uint64_t base;
 	uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0;
 	uint32_t crtc_pitch, pitch_pixels;
 	uint32_t tiling_flags;
 	int format;
 	uint32_t gen_cntl_reg, gen_cntl_val;
+	int r;
 
 	DRM_DEBUG("\n");
+	/* no fb bound */
+	if (!crtc->fb) {
+		DRM_DEBUG("No FB bound\n");
+		return 0;
+	}
 
 	radeon_fb = to_radeon_framebuffer(crtc->fb);
 
@@ -431,10 +449,22 @@
 		return false;
 	}
 
+	/* Pin framebuffer & get tilling informations */
 	obj = radeon_fb->obj;
-	if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) {
+	rbo = obj->driver_private;
+	r = radeon_bo_reserve(rbo, false);
+	if (unlikely(r != 0))
+		return r;
+	r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
+	if (unlikely(r != 0)) {
+		radeon_bo_unreserve(rbo);
 		return -EINVAL;
 	}
+	radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+	radeon_bo_unreserve(rbo);
+	if (tiling_flags & RADEON_TILING_MICRO)
+		DRM_ERROR("trying to scanout microtiled buffer\n");
+
 	/* if scanout was in GTT this really wouldn't work */
 	/* crtc offset is from display base addr not FB location */
 	radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location;
@@ -449,10 +479,6 @@
 		       (crtc->fb->bits_per_pixel * 8));
 	crtc_pitch |= crtc_pitch << 16;
 
-	radeon_object_get_tiling_flags(obj->driver_private,
-				       &tiling_flags, NULL);
-	if (tiling_flags & RADEON_TILING_MICRO)
-		DRM_ERROR("trying to scanout microtiled buffer\n");
 
 	if (tiling_flags & RADEON_TILING_MACRO) {
 		if (ASIC_IS_R300(rdev))
@@ -530,7 +556,12 @@
 
 	if (old_fb && old_fb != crtc->fb) {
 		radeon_fb = to_radeon_framebuffer(old_fb);
-		radeon_gem_object_unpin(radeon_fb->obj);
+		rbo = radeon_fb->obj->driver_private;
+		r = radeon_bo_reserve(rbo, false);
+		if (unlikely(r != 0))
+			return r;
+		radeon_bo_unpin(rbo);
+		radeon_bo_unreserve(rbo);
 	}
 
 	/* Bytes per pixel may have changed */
@@ -642,12 +673,8 @@
 		uint32_t crtc2_gen_cntl;
 		uint32_t disp2_merge_cntl;
 
-		/* check to see if TV DAC is enabled for another crtc and keep it enabled */
-		if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_CRT2_ON)
-			crtc2_gen_cntl = RADEON_CRTC2_CRT2_ON;
-		else
-			crtc2_gen_cntl = 0;
-
+		/* if TV DAC is enabled for another crtc and keep it enabled */
+		crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0x00718080;
 		crtc2_gen_cntl |= ((format << 8)
 				   | RADEON_CRTC2_VSYNC_DIS
 				   | RADEON_CRTC2_HSYNC_DIS
@@ -676,7 +703,8 @@
 		uint32_t crtc_ext_cntl;
 		uint32_t disp_merge_cntl;
 
-		crtc_gen_cntl = (RADEON_CRTC_EXT_DISP_EN
+		crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0x00718000;
+		crtc_gen_cntl |= (RADEON_CRTC_EXT_DISP_EN
 				 | (format << 8)
 				 | RADEON_CRTC_DISP_REQ_EN_B
 				 | ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -779,15 +807,17 @@
 			if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
 				pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
 			if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
-				struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-				struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
-				if (lvds) {
-					if (lvds->use_bios_dividers) {
-						pll_ref_div = lvds->panel_ref_divider;
-						pll_fb_post_div   = (lvds->panel_fb_divider |
-								     (lvds->panel_post_divider << 16));
-						htotal_cntl  = 0;
-						use_bios_divs = true;
+				if (!rdev->is_atom_bios) {
+					struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+					struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
+					if (lvds) {
+						if (lvds->use_bios_dividers) {
+							pll_ref_div = lvds->panel_ref_divider;
+							pll_fb_post_div   = (lvds->panel_fb_divider |
+									     (lvds->panel_post_divider << 16));
+							htotal_cntl  = 0;
+							use_bios_divs = true;
+						}
 					}
 				}
 				pll_flags |= RADEON_PLL_USE_REF_DIV;
@@ -1027,6 +1057,7 @@
 	radeon_crtc_set_base(crtc, x, y, old_fb);
 	radeon_set_crtc_timing(crtc, adjusted_mode);
 	radeon_set_pll(crtc, adjusted_mode);
+	radeon_overscan_setup(crtc, adjusted_mode);
 	if (radeon_crtc->crtc_id == 0) {
 		radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode);
 	} else {
@@ -1042,12 +1073,29 @@
 
 static void radeon_crtc_prepare(struct drm_crtc *crtc)
 {
-	radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+	struct drm_device *dev = crtc->dev;
+	struct drm_crtc *crtci;
+
+	/*
+	* The hardware wedges sometimes if you reconfigure one CRTC
+	* whilst another is running (see fdo bug #24611).
+	*/
+	list_for_each_entry(crtci, &dev->mode_config.crtc_list, head)
+		radeon_crtc_dpms(crtci, DRM_MODE_DPMS_OFF);
 }
 
 static void radeon_crtc_commit(struct drm_crtc *crtc)
 {
-	radeon_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+	struct drm_device *dev = crtc->dev;
+	struct drm_crtc *crtci;
+
+	/*
+	* Reenable the CRTCs that should be running.
+	*/
+	list_for_each_entry(crtci, &dev->mode_config.crtc_list, head) {
+		if (crtci->enabled)
+			radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
+	}
 }
 
 static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 0038212..df00515 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -136,7 +136,14 @@
 	lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN;
 
 	lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL);
-	if ((!rdev->is_atom_bios)) {
+	if (rdev->is_atom_bios) {
+		/* LVDS_GEN_CNTL parameters are computed in LVDSEncoderControl
+		 * need to call that on resume to set up the reg properly.
+		 */
+		radeon_encoder->pixel_clock = adjusted_mode->clock;
+		atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
+		lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+	} else {
 		struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
 		if (lvds) {
 			DRM_DEBUG("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl);
@@ -147,8 +154,7 @@
 					     (lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
 		} else
 			lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
-	} else
-		lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+	}
 	lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
 	lvds_gen_cntl &= ~(RADEON_LVDS_ON |
 			   RADEON_LVDS_BLON |
@@ -184,9 +190,9 @@
 		radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
 }
 
-static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder,
-					  struct drm_display_mode *mode,
-					  struct drm_display_mode *adjusted_mode)
+static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
+				     struct drm_display_mode *mode,
+				     struct drm_display_mode *adjusted_mode)
 {
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 
@@ -194,15 +200,22 @@
 	radeon_encoder_set_active_device(encoder);
 	drm_mode_set_crtcinfo(adjusted_mode, 0);
 
-	if (radeon_encoder->rmx_type != RMX_OFF)
-		radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
+	/* get the native mode for LVDS */
+	if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
+		struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+		int mode_id = adjusted_mode->base.id;
+		*adjusted_mode = *native_mode;
+		adjusted_mode->hdisplay = mode->hdisplay;
+		adjusted_mode->vdisplay = mode->vdisplay;
+		adjusted_mode->base.id = mode_id;
+	}
 
 	return true;
 }
 
 static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
 	.dpms = radeon_legacy_lvds_dpms,
-	.mode_fixup = radeon_legacy_lvds_mode_fixup,
+	.mode_fixup = radeon_legacy_mode_fixup,
 	.prepare = radeon_legacy_lvds_prepare,
 	.mode_set = radeon_legacy_lvds_mode_set,
 	.commit = radeon_legacy_lvds_commit,
@@ -214,17 +227,6 @@
 	.destroy = radeon_enc_destroy,
 };
 
-static bool radeon_legacy_primary_dac_mode_fixup(struct drm_encoder *encoder,
-						 struct drm_display_mode *mode,
-						 struct drm_display_mode *adjusted_mode)
-{
-	/* set the active encoder to connector routing */
-	radeon_encoder_set_active_device(encoder);
-	drm_mode_set_crtcinfo(adjusted_mode, 0);
-
-	return true;
-}
-
 static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode)
 {
 	struct drm_device *dev = encoder->dev;
@@ -410,7 +412,7 @@
 
 static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = {
 	.dpms = radeon_legacy_primary_dac_dpms,
-	.mode_fixup = radeon_legacy_primary_dac_mode_fixup,
+	.mode_fixup = radeon_legacy_mode_fixup,
 	.prepare = radeon_legacy_primary_dac_prepare,
 	.mode_set = radeon_legacy_primary_dac_mode_set,
 	.commit = radeon_legacy_primary_dac_commit,
@@ -423,16 +425,6 @@
 	.destroy = radeon_enc_destroy,
 };
 
-static bool radeon_legacy_tmds_int_mode_fixup(struct drm_encoder *encoder,
-					      struct drm_display_mode *mode,
-					      struct drm_display_mode *adjusted_mode)
-{
-
-	drm_mode_set_crtcinfo(adjusted_mode, 0);
-
-	return true;
-}
-
 static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
 {
 	struct drm_device *dev = encoder->dev;
@@ -584,7 +576,7 @@
 
 static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = {
 	.dpms = radeon_legacy_tmds_int_dpms,
-	.mode_fixup = radeon_legacy_tmds_int_mode_fixup,
+	.mode_fixup = radeon_legacy_mode_fixup,
 	.prepare = radeon_legacy_tmds_int_prepare,
 	.mode_set = radeon_legacy_tmds_int_mode_set,
 	.commit = radeon_legacy_tmds_int_commit,
@@ -596,17 +588,6 @@
 	.destroy = radeon_enc_destroy,
 };
 
-static bool radeon_legacy_tmds_ext_mode_fixup(struct drm_encoder *encoder,
-					      struct drm_display_mode *mode,
-					      struct drm_display_mode *adjusted_mode)
-{
-	/* set the active encoder to connector routing */
-	radeon_encoder_set_active_device(encoder);
-	drm_mode_set_crtcinfo(adjusted_mode, 0);
-
-	return true;
-}
-
 static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
 {
 	struct drm_device *dev = encoder->dev;
@@ -697,6 +678,8 @@
 			/*if (mode->clock > 165000)
 			  fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/
 		}
+		if (!radeon_combios_external_tmds_setup(encoder))
+			radeon_external_tmds_setup(encoder);
 	}
 
 	if (radeon_crtc->crtc_id == 0) {
@@ -724,9 +707,22 @@
 		radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
 }
 
+static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+	if (tmds) {
+		if (tmds->i2c_bus)
+			radeon_i2c_destroy(tmds->i2c_bus);
+	}
+	kfree(radeon_encoder->enc_priv);
+	drm_encoder_cleanup(encoder);
+	kfree(radeon_encoder);
+}
+
 static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = {
 	.dpms = radeon_legacy_tmds_ext_dpms,
-	.mode_fixup = radeon_legacy_tmds_ext_mode_fixup,
+	.mode_fixup = radeon_legacy_mode_fixup,
 	.prepare = radeon_legacy_tmds_ext_prepare,
 	.mode_set = radeon_legacy_tmds_ext_mode_set,
 	.commit = radeon_legacy_tmds_ext_commit,
@@ -735,20 +731,9 @@
 
 
 static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = {
-	.destroy = radeon_enc_destroy,
+	.destroy = radeon_ext_tmds_enc_destroy,
 };
 
-static bool radeon_legacy_tv_dac_mode_fixup(struct drm_encoder *encoder,
-					    struct drm_display_mode *mode,
-					    struct drm_display_mode *adjusted_mode)
-{
-	/* set the active encoder to connector routing */
-	radeon_encoder_set_active_device(encoder);
-	drm_mode_set_crtcinfo(adjusted_mode, 0);
-
-	return true;
-}
-
 static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
 {
 	struct drm_device *dev = encoder->dev;
@@ -1265,7 +1250,7 @@
 
 static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = {
 	.dpms = radeon_legacy_tv_dac_dpms,
-	.mode_fixup = radeon_legacy_tv_dac_mode_fixup,
+	.mode_fixup = radeon_legacy_mode_fixup,
 	.prepare = radeon_legacy_tv_dac_prepare,
 	.mode_set = radeon_legacy_tv_dac_mode_set,
 	.commit = radeon_legacy_tv_dac_commit,
@@ -1302,6 +1287,29 @@
 	return tmds;
 }
 
+static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct radeon_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder_ext_tmds *tmds = NULL;
+	bool ret;
+
+	if (rdev->is_atom_bios)
+		return NULL;
+
+	tmds = kzalloc(sizeof(struct radeon_encoder_ext_tmds), GFP_KERNEL);
+
+	if (!tmds)
+		return NULL;
+
+	ret = radeon_legacy_get_ext_tmds_info_from_combios(encoder, tmds);
+
+	if (ret == false)
+		radeon_legacy_get_ext_tmds_info_from_table(encoder, tmds);
+
+	return tmds;
+}
+
 void
 radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
 {
@@ -1329,7 +1337,6 @@
 		encoder->possible_crtcs = 0x1;
 	else
 		encoder->possible_crtcs = 0x3;
-	encoder->possible_clones = 0;
 
 	radeon_encoder->enc_priv = NULL;
 
@@ -1373,7 +1380,7 @@
 		drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS);
 		drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs);
 		if (!rdev->is_atom_bios)
-			radeon_combios_get_ext_tmds_info(radeon_encoder);
+			radeon_encoder->enc_priv = radeon_legacy_get_ext_tmds_info(radeon_encoder);
 		break;
 	}
 }
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index ace726a..44d4b65 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -33,6 +33,7 @@
 #include <drm_crtc.h>
 #include <drm_mode.h>
 #include <drm_edid.h>
+#include <drm_dp_helper.h>
 #include <linux/i2c.h>
 #include <linux/i2c-id.h>
 #include <linux/i2c-algo-bit.h>
@@ -89,24 +90,45 @@
 	TV_STD_PAL_CN,
 };
 
+/* radeon gpio-based i2c
+ * 1. "mask" reg and bits
+ *    grabs the gpio pins for software use
+ *    0=not held  1=held
+ * 2. "a" reg and bits
+ *    output pin value
+ *    0=low 1=high
+ * 3. "en" reg and bits
+ *    sets the pin direction
+ *    0=input 1=output
+ * 4. "y" reg and bits
+ *    input pin value
+ *    0=low 1=high
+ */
 struct radeon_i2c_bus_rec {
 	bool valid;
+	/* id used by atom */
+	uint8_t i2c_id;
+	/* can be used with hw i2c engine */
+	bool hw_capable;
+	/* uses multi-media i2c engine */
+	bool mm_i2c;
+	/* regs and bits */
 	uint32_t mask_clk_reg;
 	uint32_t mask_data_reg;
 	uint32_t a_clk_reg;
 	uint32_t a_data_reg;
-	uint32_t put_clk_reg;
-	uint32_t put_data_reg;
-	uint32_t get_clk_reg;
-	uint32_t get_data_reg;
+	uint32_t en_clk_reg;
+	uint32_t en_data_reg;
+	uint32_t y_clk_reg;
+	uint32_t y_data_reg;
 	uint32_t mask_clk_mask;
 	uint32_t mask_data_mask;
-	uint32_t put_clk_mask;
-	uint32_t put_data_mask;
-	uint32_t get_clk_mask;
-	uint32_t get_data_mask;
 	uint32_t a_clk_mask;
 	uint32_t a_data_mask;
+	uint32_t en_clk_mask;
+	uint32_t en_data_mask;
+	uint32_t y_clk_mask;
+	uint32_t y_data_mask;
 };
 
 struct radeon_tmds_pll {
@@ -150,9 +172,12 @@
 };
 
 struct radeon_i2c_chan {
-	struct drm_device *dev;
 	struct i2c_adapter adapter;
-	struct i2c_algo_bit_data algo;
+	struct drm_device *dev;
+	union {
+		struct i2c_algo_dp_aux_data dp;
+		struct i2c_algo_bit_data bit;
+	} algo;
 	struct radeon_i2c_bus_rec rec;
 };
 
@@ -170,6 +195,11 @@
 	CT_EMAC,
 };
 
+enum radeon_dvo_chip {
+	DVO_SIL164,
+	DVO_SIL1178,
+};
+
 struct radeon_mode_info {
 	struct atom_context *atom_context;
 	struct card_info *atom_card_info;
@@ -261,6 +291,13 @@
 	struct radeon_tmds_pll tmds_pll[4];
 };
 
+struct radeon_encoder_ext_tmds {
+	/* tmds over dvo */
+	struct radeon_i2c_chan *i2c_bus;
+	uint8_t slave_addr;
+	enum radeon_dvo_chip dvo_chip;
+};
+
 /* spread spectrum */
 struct radeon_atom_ss {
 	uint16_t percentage;
@@ -302,6 +339,35 @@
 struct radeon_connector_atom_dig {
 	uint32_t igp_lane_info;
 	bool linkb;
+	/* displayport */
+	struct radeon_i2c_chan *dp_i2c_bus;
+	u8 dpcd[8];
+	u8 dp_sink_type;
+	int dp_clock;
+	int dp_lane_count;
+};
+
+struct radeon_gpio_rec {
+	bool valid;
+	u8 id;
+	u32 reg;
+	u32 mask;
+};
+
+enum radeon_hpd_id {
+	RADEON_HPD_NONE = 0,
+	RADEON_HPD_1,
+	RADEON_HPD_2,
+	RADEON_HPD_3,
+	RADEON_HPD_4,
+	RADEON_HPD_5,
+	RADEON_HPD_6,
+};
+
+struct radeon_hpd {
+	enum radeon_hpd_id hpd;
+	u8 plugged_state;
+	struct radeon_gpio_rec gpio;
 };
 
 struct radeon_connector {
@@ -318,6 +384,7 @@
 	void *con_priv;
 	bool dac_load_detect;
 	uint16_t connector_object_id;
+	struct radeon_hpd hpd;
 };
 
 struct radeon_framebuffer {
@@ -325,10 +392,37 @@
 	struct drm_gem_object *obj;
 };
 
+extern void radeon_connector_hotplug(struct drm_connector *connector);
+extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
+extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
+				       struct drm_display_mode *mode);
+extern void radeon_dp_set_link_config(struct drm_connector *connector,
+				      struct drm_display_mode *mode);
+extern void dp_link_train(struct drm_encoder *encoder,
+			  struct drm_connector *connector);
+extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
+extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
+extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
+					   int action, uint8_t lane_num,
+					   uint8_t lane_set);
+extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+				uint8_t write_byte, uint8_t *read_byte);
+
+extern struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
+						    struct radeon_i2c_bus_rec *rec,
+						    const char *name);
 extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
 						 struct radeon_i2c_bus_rec *rec,
 						 const char *name);
 extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
+extern void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
+				   u8 slave_addr,
+				   u8 addr,
+				   u8 *val);
+extern void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c,
+				   u8 slave_addr,
+				   u8 addr,
+				   u8 val);
 extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
 extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
 
@@ -343,12 +437,24 @@
 			       uint32_t *post_div_p,
 			       int flags);
 
+extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
+				     uint64_t freq,
+				     uint32_t *dot_clock_p,
+				     uint32_t *fb_div_p,
+				     uint32_t *frac_fb_div_p,
+				     uint32_t *ref_div_p,
+				     uint32_t *post_div_p,
+				     int flags);
+
+extern void radeon_setup_encoder_clones(struct drm_device *dev);
+
 struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
 struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv);
 struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv);
 struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index);
 struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index);
 extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action);
+extern void atombios_digital_setup(struct drm_encoder *encoder, int action);
 extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
 extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
 
@@ -378,12 +484,16 @@
 extern bool radeon_combios_get_clock_info(struct drm_device *dev);
 extern struct radeon_encoder_atom_dig *
 radeon_atombios_get_lvds_info(struct radeon_encoder *encoder);
-bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
-				   struct radeon_encoder_int_tmds *tmds);
-bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
-					   struct radeon_encoder_int_tmds *tmds);
-bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
-					    struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
+					  struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
+						     struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
+						   struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder,
+							 struct radeon_encoder_ext_tmds *tmds);
+extern bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
+						       struct radeon_encoder_ext_tmds *tmds);
 extern struct radeon_encoder_primary_dac *
 radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder);
 extern struct radeon_encoder_tv_dac *
@@ -395,6 +505,8 @@
 radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder);
 extern struct radeon_encoder_primary_dac *
 radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder);
+extern bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder);
+extern void radeon_external_tmds_setup(struct drm_encoder *encoder);
 extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock);
 extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev);
 extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock);
@@ -426,16 +538,13 @@
 			       struct radeon_crtc *radeon_crtc);
 void radeon_legacy_init_crtc(struct drm_device *dev,
 			     struct radeon_crtc *radeon_crtc);
-void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state);
+extern void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state);
 
 void radeon_get_clock_info(struct drm_device *dev);
 
 extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev);
 extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev);
 
-void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
-			   struct drm_display_mode *mode,
-			   struct drm_display_mode *adjusted_mode);
 void radeon_enc_destroy(struct drm_encoder *encoder);
 void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
 void radeon_combios_asic_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 1f056da..544e18f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -34,100 +34,53 @@
 #include "radeon_drm.h"
 #include "radeon.h"
 
-struct radeon_object {
-	struct ttm_buffer_object	tobj;
-	struct list_head		list;
-	struct radeon_device		*rdev;
-	struct drm_gem_object		*gobj;
-	struct ttm_bo_kmap_obj		kmap;
-	unsigned			pin_count;
-	uint64_t			gpu_addr;
-	void				*kptr;
-	bool				is_iomem;
-	uint32_t			tiling_flags;
-	uint32_t			pitch;
-	int				surface_reg;
-};
 
 int radeon_ttm_init(struct radeon_device *rdev);
 void radeon_ttm_fini(struct radeon_device *rdev);
+static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
 
 /*
  * To exclude mutual BO access we rely on bo_reserve exclusion, as all
  * function are calling it.
  */
 
-static int radeon_object_reserve(struct radeon_object *robj, bool interruptible)
+static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 {
-	return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0);
+	struct radeon_bo *bo;
+
+	bo = container_of(tbo, struct radeon_bo, tbo);
+	mutex_lock(&bo->rdev->gem.mutex);
+	list_del_init(&bo->list);
+	mutex_unlock(&bo->rdev->gem.mutex);
+	radeon_bo_clear_surface_reg(bo);
+	kfree(bo);
 }
 
-static void radeon_object_unreserve(struct radeon_object *robj)
+void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
 {
-	ttm_bo_unreserve(&robj->tobj);
+	u32 c = 0;
+
+	rbo->placement.fpfn = 0;
+	rbo->placement.lpfn = 0;
+	rbo->placement.placement = rbo->placements;
+	rbo->placement.busy_placement = rbo->placements;
+	if (domain & RADEON_GEM_DOMAIN_VRAM)
+		rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+					TTM_PL_FLAG_VRAM;
+	if (domain & RADEON_GEM_DOMAIN_GTT)
+		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+	if (domain & RADEON_GEM_DOMAIN_CPU)
+		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+	rbo->placement.num_placement = c;
+	rbo->placement.num_busy_placement = c;
 }
 
-static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
+int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
+			unsigned long size, bool kernel, u32 domain,
+			struct radeon_bo **bo_ptr)
 {
-	struct radeon_object *robj;
-
-	robj = container_of(tobj, struct radeon_object, tobj);
-	list_del_init(&robj->list);
-	radeon_object_clear_surface_reg(robj);
-	kfree(robj);
-}
-
-static inline void radeon_object_gpu_addr(struct radeon_object *robj)
-{
-	/* Default gpu address */
-	robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
-	if (robj->tobj.mem.mm_node == NULL) {
-		return;
-	}
-	robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
-	switch (robj->tobj.mem.mem_type) {
-	case TTM_PL_VRAM:
-		robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
-		break;
-	case TTM_PL_TT:
-		robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
-		break;
-	default:
-		DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
-		robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
-		return;
-	}
-}
-
-static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
-{
-	uint32_t flags = 0;
-	if (domain & RADEON_GEM_DOMAIN_VRAM) {
-		flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
-	}
-	if (domain & RADEON_GEM_DOMAIN_GTT) {
-		flags |= TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
-	}
-	if (domain & RADEON_GEM_DOMAIN_CPU) {
-		flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
-	}
-	if (!flags) {
-		flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
-	}
-	return flags;
-}
-
-int radeon_object_create(struct radeon_device *rdev,
-			 struct drm_gem_object *gobj,
-			 unsigned long size,
-			 bool kernel,
-			 uint32_t domain,
-			 bool interruptible,
-			 struct radeon_object **robj_ptr)
-{
-	struct radeon_object *robj;
+	struct radeon_bo *bo;
 	enum ttm_bo_type type;
-	uint32_t flags;
 	int r;
 
 	if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
@@ -138,206 +91,125 @@
 	} else {
 		type = ttm_bo_type_device;
 	}
-	*robj_ptr = NULL;
-	robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
-	if (robj == NULL) {
+	*bo_ptr = NULL;
+	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
+	if (bo == NULL)
 		return -ENOMEM;
-	}
-	robj->rdev = rdev;
-	robj->gobj = gobj;
-	robj->surface_reg = -1;
-	INIT_LIST_HEAD(&robj->list);
+	bo->rdev = rdev;
+	bo->gobj = gobj;
+	bo->surface_reg = -1;
+	INIT_LIST_HEAD(&bo->list);
 
-	flags = radeon_object_flags_from_domain(domain);
-	r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
-				   0, 0, false, NULL, size,
-				   &radeon_ttm_object_object_destroy);
+	radeon_ttm_placement_from_domain(bo, domain);
+	/* Kernel allocation are uninterruptible */
+	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
+			&bo->placement, 0, 0, !kernel, NULL, size,
+			&radeon_ttm_bo_destroy);
 	if (unlikely(r != 0)) {
-		/* ttm call radeon_ttm_object_object_destroy if error happen */
-		DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
-			  size, flags, 0);
+		if (r != -ERESTARTSYS)
+			dev_err(rdev->dev,
+				"object_init failed for (%lu, 0x%08X)\n",
+				size, domain);
 		return r;
 	}
-	*robj_ptr = robj;
+	*bo_ptr = bo;
 	if (gobj) {
-		list_add_tail(&robj->list, &rdev->gem.objects);
+		mutex_lock(&bo->rdev->gem.mutex);
+		list_add_tail(&bo->list, &rdev->gem.objects);
+		mutex_unlock(&bo->rdev->gem.mutex);
 	}
 	return 0;
 }
 
-int radeon_object_kmap(struct radeon_object *robj, void **ptr)
+int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
 {
+	bool is_iomem;
 	int r;
 
-	spin_lock(&robj->tobj.lock);
-	if (robj->kptr) {
+	if (bo->kptr) {
 		if (ptr) {
-			*ptr = robj->kptr;
+			*ptr = bo->kptr;
 		}
-		spin_unlock(&robj->tobj.lock);
 		return 0;
 	}
-	spin_unlock(&robj->tobj.lock);
-	r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
+	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
 	if (r) {
 		return r;
 	}
-	spin_lock(&robj->tobj.lock);
-	robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
-	spin_unlock(&robj->tobj.lock);
+	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
 	if (ptr) {
-		*ptr = robj->kptr;
+		*ptr = bo->kptr;
 	}
-	radeon_object_check_tiling(robj, 0, 0);
+	radeon_bo_check_tiling(bo, 0, 0);
 	return 0;
 }
 
-void radeon_object_kunmap(struct radeon_object *robj)
+void radeon_bo_kunmap(struct radeon_bo *bo)
 {
-	spin_lock(&robj->tobj.lock);
-	if (robj->kptr == NULL) {
-		spin_unlock(&robj->tobj.lock);
+	if (bo->kptr == NULL)
 		return;
-	}
-	robj->kptr = NULL;
-	spin_unlock(&robj->tobj.lock);
-	radeon_object_check_tiling(robj, 0, 0);
-	ttm_bo_kunmap(&robj->kmap);
+	bo->kptr = NULL;
+	radeon_bo_check_tiling(bo, 0, 0);
+	ttm_bo_kunmap(&bo->kmap);
 }
 
-void radeon_object_unref(struct radeon_object **robj)
+void radeon_bo_unref(struct radeon_bo **bo)
 {
-	struct ttm_buffer_object *tobj;
+	struct ttm_buffer_object *tbo;
 
-	if ((*robj) == NULL) {
+	if ((*bo) == NULL)
 		return;
-	}
-	tobj = &((*robj)->tobj);
-	ttm_bo_unref(&tobj);
-	if (tobj == NULL) {
-		*robj = NULL;
-	}
+	tbo = &((*bo)->tbo);
+	ttm_bo_unref(&tbo);
+	if (tbo == NULL)
+		*bo = NULL;
 }
 
-int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
+int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
 {
-	*offset = robj->tobj.addr_space_offset;
-	return 0;
-}
+	int r, i;
 
-int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
-		      uint64_t *gpu_addr)
-{
-	uint32_t flags;
-	uint32_t tmp;
-	int r;
-
-	flags = radeon_object_flags_from_domain(domain);
-	spin_lock(&robj->tobj.lock);
-	if (robj->pin_count) {
-		robj->pin_count++;
-		if (gpu_addr != NULL) {
-			*gpu_addr = robj->gpu_addr;
-		}
-		spin_unlock(&robj->tobj.lock);
+	radeon_ttm_placement_from_domain(bo, domain);
+	if (bo->pin_count) {
+		bo->pin_count++;
+		if (gpu_addr)
+			*gpu_addr = radeon_bo_gpu_offset(bo);
 		return 0;
 	}
-	spin_unlock(&robj->tobj.lock);
-	r = radeon_object_reserve(robj, false);
-	if (unlikely(r != 0)) {
-		DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
-		return r;
+	radeon_ttm_placement_from_domain(bo, domain);
+	for (i = 0; i < bo->placement.num_placement; i++)
+		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+	if (likely(r == 0)) {
+		bo->pin_count = 1;
+		if (gpu_addr != NULL)
+			*gpu_addr = radeon_bo_gpu_offset(bo);
 	}
-	tmp = robj->tobj.mem.placement;
-	ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
-	robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
-	r = ttm_buffer_object_validate(&robj->tobj,
-				       robj->tobj.proposed_placement,
-				       false, false);
-	radeon_object_gpu_addr(robj);
-	if (gpu_addr != NULL) {
-		*gpu_addr = robj->gpu_addr;
-	}
-	robj->pin_count = 1;
-	if (unlikely(r != 0)) {
-		DRM_ERROR("radeon: failed to pin object.\n");
-	}
-	radeon_object_unreserve(robj);
+	if (unlikely(r != 0))
+		dev_err(bo->rdev->dev, "%p pin failed\n", bo);
 	return r;
 }
 
-void radeon_object_unpin(struct radeon_object *robj)
+int radeon_bo_unpin(struct radeon_bo *bo)
 {
-	uint32_t flags;
-	int r;
+	int r, i;
 
-	spin_lock(&robj->tobj.lock);
-	if (!robj->pin_count) {
-		spin_unlock(&robj->tobj.lock);
-		printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
-		return;
+	if (!bo->pin_count) {
+		dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
+		return 0;
 	}
-	robj->pin_count--;
-	if (robj->pin_count) {
-		spin_unlock(&robj->tobj.lock);
-		return;
-	}
-	spin_unlock(&robj->tobj.lock);
-	r = radeon_object_reserve(robj, false);
-	if (unlikely(r != 0)) {
-		DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
-		return;
-	}
-	flags = robj->tobj.mem.placement;
-	robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
-	r = ttm_buffer_object_validate(&robj->tobj,
-				       robj->tobj.proposed_placement,
-				       false, false);
-	if (unlikely(r != 0)) {
-		DRM_ERROR("radeon: failed to unpin buffer.\n");
-	}
-	radeon_object_unreserve(robj);
-}
-
-int radeon_object_wait(struct radeon_object *robj)
-{
-	int r = 0;
-
-	/* FIXME: should use block reservation instead */
-	r = radeon_object_reserve(robj, true);
-	if (unlikely(r != 0)) {
-		DRM_ERROR("radeon: failed to reserve object for waiting.\n");
-		return r;
-	}
-	spin_lock(&robj->tobj.lock);
-	if (robj->tobj.sync_obj) {
-		r = ttm_bo_wait(&robj->tobj, true, true, false);
-	}
-	spin_unlock(&robj->tobj.lock);
-	radeon_object_unreserve(robj);
+	bo->pin_count--;
+	if (bo->pin_count)
+		return 0;
+	for (i = 0; i < bo->placement.num_placement; i++)
+		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+	if (unlikely(r != 0))
+		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
 	return r;
 }
 
-int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement)
-{
-	int r = 0;
-
-	r = radeon_object_reserve(robj, true);
-	if (unlikely(r != 0)) {
-		DRM_ERROR("radeon: failed to reserve object for waiting.\n");
-		return r;
-	}
-	spin_lock(&robj->tobj.lock);
-	*cur_placement = robj->tobj.mem.mem_type;
-	if (robj->tobj.sync_obj) {
-		r = ttm_bo_wait(&robj->tobj, true, true, true);
-	}
-	spin_unlock(&robj->tobj.lock);
-	radeon_object_unreserve(robj);
-	return r;
-}
-
-int radeon_object_evict_vram(struct radeon_device *rdev)
+int radeon_bo_evict_vram(struct radeon_device *rdev)
 {
 	if (rdev->flags & RADEON_IS_IGP) {
 		/* Useless to evict on IGP chips */
@@ -346,30 +218,32 @@
 	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
 }
 
-void radeon_object_force_delete(struct radeon_device *rdev)
+void radeon_bo_force_delete(struct radeon_device *rdev)
 {
-	struct radeon_object *robj, *n;
+	struct radeon_bo *bo, *n;
 	struct drm_gem_object *gobj;
 
 	if (list_empty(&rdev->gem.objects)) {
 		return;
 	}
-	DRM_ERROR("Userspace still has active objects !\n");
-	list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) {
+	dev_err(rdev->dev, "Userspace still has active objects !\n");
+	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
 		mutex_lock(&rdev->ddev->struct_mutex);
-		gobj = robj->gobj;
-		DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n",
-			  gobj, robj, (unsigned long)gobj->size,
-			  *((unsigned long *)&gobj->refcount));
-		list_del_init(&robj->list);
-		radeon_object_unref(&robj);
+		gobj = bo->gobj;
+		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
+			gobj, bo, (unsigned long)gobj->size,
+			*((unsigned long *)&gobj->refcount));
+		mutex_lock(&bo->rdev->gem.mutex);
+		list_del_init(&bo->list);
+		mutex_unlock(&bo->rdev->gem.mutex);
+		radeon_bo_unref(&bo);
 		gobj->driver_private = NULL;
 		drm_gem_object_unreference(gobj);
 		mutex_unlock(&rdev->ddev->struct_mutex);
 	}
 }
 
-int radeon_object_init(struct radeon_device *rdev)
+int radeon_bo_init(struct radeon_device *rdev)
 {
 	/* Add an MTRR for the VRAM */
 	rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
@@ -382,13 +256,13 @@
 	return radeon_ttm_init(rdev);
 }
 
-void radeon_object_fini(struct radeon_device *rdev)
+void radeon_bo_fini(struct radeon_device *rdev)
 {
 	radeon_ttm_fini(rdev);
 }
 
-void radeon_object_list_add_object(struct radeon_object_list *lobj,
-				   struct list_head *head)
+void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
+				struct list_head *head)
 {
 	if (lobj->wdomain) {
 		list_add(&lobj->list, head);
@@ -397,72 +271,62 @@
 	}
 }
 
-int radeon_object_list_reserve(struct list_head *head)
+int radeon_bo_list_reserve(struct list_head *head)
 {
-	struct radeon_object_list *lobj;
+	struct radeon_bo_list *lobj;
 	int r;
 
 	list_for_each_entry(lobj, head, list){
-		if (!lobj->robj->pin_count) {
-			r = radeon_object_reserve(lobj->robj, true);
-			if (unlikely(r != 0)) {
-				DRM_ERROR("radeon: failed to reserve object.\n");
-				return r;
-			}
-		} else {
-		}
+		r = radeon_bo_reserve(lobj->bo, false);
+		if (unlikely(r != 0))
+			return r;
 	}
 	return 0;
 }
 
-void radeon_object_list_unreserve(struct list_head *head)
+void radeon_bo_list_unreserve(struct list_head *head)
 {
-	struct radeon_object_list *lobj;
+	struct radeon_bo_list *lobj;
 
 	list_for_each_entry(lobj, head, list) {
-		if (!lobj->robj->pin_count) {
-			radeon_object_unreserve(lobj->robj);
-		}
+		/* only unreserve object we successfully reserved */
+		if (radeon_bo_is_reserved(lobj->bo))
+			radeon_bo_unreserve(lobj->bo);
 	}
 }
 
-int radeon_object_list_validate(struct list_head *head, void *fence)
+int radeon_bo_list_validate(struct list_head *head, void *fence)
 {
-	struct radeon_object_list *lobj;
-	struct radeon_object *robj;
+	struct radeon_bo_list *lobj;
+	struct radeon_bo *bo;
 	struct radeon_fence *old_fence = NULL;
 	int r;
 
-	r = radeon_object_list_reserve(head);
+	r = radeon_bo_list_reserve(head);
 	if (unlikely(r != 0)) {
-		radeon_object_list_unreserve(head);
 		return r;
 	}
 	list_for_each_entry(lobj, head, list) {
-		robj = lobj->robj;
-		if (!robj->pin_count) {
+		bo = lobj->bo;
+		if (!bo->pin_count) {
 			if (lobj->wdomain) {
-				robj->tobj.proposed_placement =
-					radeon_object_flags_from_domain(lobj->wdomain);
+				radeon_ttm_placement_from_domain(bo,
+								lobj->wdomain);
 			} else {
-				robj->tobj.proposed_placement =
-					radeon_object_flags_from_domain(lobj->rdomain);
+				radeon_ttm_placement_from_domain(bo,
+								lobj->rdomain);
 			}
-			r = ttm_buffer_object_validate(&robj->tobj,
-						       robj->tobj.proposed_placement,
-						       true, false);
-			if (unlikely(r)) {
-				DRM_ERROR("radeon: failed to validate.\n");
+			r = ttm_bo_validate(&bo->tbo, &bo->placement,
+						true, false);
+			if (unlikely(r))
 				return r;
-			}
-			radeon_object_gpu_addr(robj);
 		}
-		lobj->gpu_offset = robj->gpu_addr;
-		lobj->tiling_flags = robj->tiling_flags;
+		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
+		lobj->tiling_flags = bo->tiling_flags;
 		if (fence) {
-			old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
-			robj->tobj.sync_obj = radeon_fence_ref(fence);
-			robj->tobj.sync_obj_arg = NULL;
+			old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
+			bo->tbo.sync_obj = radeon_fence_ref(fence);
+			bo->tbo.sync_obj_arg = NULL;
 		}
 		if (old_fence) {
 			radeon_fence_unref(&old_fence);
@@ -471,51 +335,44 @@
 	return 0;
 }
 
-void radeon_object_list_unvalidate(struct list_head *head)
+void radeon_bo_list_unvalidate(struct list_head *head, void *fence)
 {
-	struct radeon_object_list *lobj;
-	struct radeon_fence *old_fence = NULL;
+	struct radeon_bo_list *lobj;
+	struct radeon_fence *old_fence;
 
-	list_for_each_entry(lobj, head, list) {
-		old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
-		lobj->robj->tobj.sync_obj = NULL;
-		if (old_fence) {
-			radeon_fence_unref(&old_fence);
+	if (fence)
+		list_for_each_entry(lobj, head, list) {
+			old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj);
+			if (old_fence == fence) {
+				lobj->bo->tbo.sync_obj = NULL;
+				radeon_fence_unref(&old_fence);
+			}
 		}
-	}
-	radeon_object_list_unreserve(head);
+	radeon_bo_list_unreserve(head);
 }
 
-void radeon_object_list_clean(struct list_head *head)
-{
-	radeon_object_list_unreserve(head);
-}
-
-int radeon_object_fbdev_mmap(struct radeon_object *robj,
+int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
 			     struct vm_area_struct *vma)
 {
-	return ttm_fbdev_mmap(vma, &robj->tobj);
+	return ttm_fbdev_mmap(vma, &bo->tbo);
 }
 
-unsigned long radeon_object_size(struct radeon_object *robj)
+int radeon_bo_get_surface_reg(struct radeon_bo *bo)
 {
-	return robj->tobj.num_pages << PAGE_SHIFT;
-}
-
-int radeon_object_get_surface_reg(struct radeon_object *robj)
-{
-	struct radeon_device *rdev = robj->rdev;
+	struct radeon_device *rdev = bo->rdev;
 	struct radeon_surface_reg *reg;
-	struct radeon_object *old_object;
+	struct radeon_bo *old_object;
 	int steal;
 	int i;
 
-	if (!robj->tiling_flags)
+	BUG_ON(!atomic_read(&bo->tbo.reserved));
+
+	if (!bo->tiling_flags)
 		return 0;
 
-	if (robj->surface_reg >= 0) {
-		reg = &rdev->surface_regs[robj->surface_reg];
-		i = robj->surface_reg;
+	if (bo->surface_reg >= 0) {
+		reg = &rdev->surface_regs[bo->surface_reg];
+		i = bo->surface_reg;
 		goto out;
 	}
 
@@ -523,10 +380,10 @@
 	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
 
 		reg = &rdev->surface_regs[i];
-		if (!reg->robj)
+		if (!reg->bo)
 			break;
 
-		old_object = reg->robj;
+		old_object = reg->bo;
 		if (old_object->pin_count == 0)
 			steal = i;
 	}
@@ -537,91 +394,101 @@
 			return -ENOMEM;
 		/* find someone with a surface reg and nuke their BO */
 		reg = &rdev->surface_regs[steal];
-		old_object = reg->robj;
+		old_object = reg->bo;
 		/* blow away the mapping */
 		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
-		ttm_bo_unmap_virtual(&old_object->tobj);
+		ttm_bo_unmap_virtual(&old_object->tbo);
 		old_object->surface_reg = -1;
 		i = steal;
 	}
 
-	robj->surface_reg = i;
-	reg->robj = robj;
+	bo->surface_reg = i;
+	reg->bo = bo;
 
 out:
-	radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch,
-			       robj->tobj.mem.mm_node->start << PAGE_SHIFT,
-			       robj->tobj.num_pages << PAGE_SHIFT);
+	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
+			       bo->tbo.mem.mm_node->start << PAGE_SHIFT,
+			       bo->tbo.num_pages << PAGE_SHIFT);
 	return 0;
 }
 
-void radeon_object_clear_surface_reg(struct radeon_object *robj)
+static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
 {
-	struct radeon_device *rdev = robj->rdev;
+	struct radeon_device *rdev = bo->rdev;
 	struct radeon_surface_reg *reg;
 
-	if (robj->surface_reg == -1)
+	if (bo->surface_reg == -1)
 		return;
 
-	reg = &rdev->surface_regs[robj->surface_reg];
-	radeon_clear_surface_reg(rdev, robj->surface_reg);
+	reg = &rdev->surface_regs[bo->surface_reg];
+	radeon_clear_surface_reg(rdev, bo->surface_reg);
 
-	reg->robj = NULL;
-	robj->surface_reg = -1;
+	reg->bo = NULL;
+	bo->surface_reg = -1;
 }
 
-void radeon_object_set_tiling_flags(struct radeon_object *robj,
-				    uint32_t tiling_flags, uint32_t pitch)
+int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
+				uint32_t tiling_flags, uint32_t pitch)
 {
-	robj->tiling_flags = tiling_flags;
-	robj->pitch = pitch;
+	int r;
+
+	r = radeon_bo_reserve(bo, false);
+	if (unlikely(r != 0))
+		return r;
+	bo->tiling_flags = tiling_flags;
+	bo->pitch = pitch;
+	radeon_bo_unreserve(bo);
+	return 0;
 }
 
-void radeon_object_get_tiling_flags(struct radeon_object *robj,
-				    uint32_t *tiling_flags,
-				    uint32_t *pitch)
+void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
+				uint32_t *tiling_flags,
+				uint32_t *pitch)
 {
+	BUG_ON(!atomic_read(&bo->tbo.reserved));
 	if (tiling_flags)
-		*tiling_flags = robj->tiling_flags;
+		*tiling_flags = bo->tiling_flags;
 	if (pitch)
-		*pitch = robj->pitch;
+		*pitch = bo->pitch;
 }
 
-int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
-			       bool force_drop)
+int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
+				bool force_drop)
 {
-	if (!(robj->tiling_flags & RADEON_TILING_SURFACE))
+	BUG_ON(!atomic_read(&bo->tbo.reserved));
+
+	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
 		return 0;
 
 	if (force_drop) {
-		radeon_object_clear_surface_reg(robj);
+		radeon_bo_clear_surface_reg(bo);
 		return 0;
 	}
 
-	if (robj->tobj.mem.mem_type != TTM_PL_VRAM) {
+	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
 		if (!has_moved)
 			return 0;
 
-		if (robj->surface_reg >= 0)
-			radeon_object_clear_surface_reg(robj);
+		if (bo->surface_reg >= 0)
+			radeon_bo_clear_surface_reg(bo);
 		return 0;
 	}
 
-	if ((robj->surface_reg >= 0) && !has_moved)
+	if ((bo->surface_reg >= 0) && !has_moved)
 		return 0;
 
-	return radeon_object_get_surface_reg(robj);
+	return radeon_bo_get_surface_reg(bo);
 }
 
 void radeon_bo_move_notify(struct ttm_buffer_object *bo,
-			  struct ttm_mem_reg *mem)
+				struct ttm_mem_reg *mem)
 {
-	struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
-	radeon_object_check_tiling(robj, 0, 1);
+	struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+	radeon_bo_check_tiling(rbo, 0, 1);
 }
 
 void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
-	struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
-	radeon_object_check_tiling(robj, 0, 0);
+	struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+	radeon_bo_check_tiling(rbo, 0, 0);
 }
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 10e8af6..f6b69c2 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -28,19 +28,152 @@
 #ifndef __RADEON_OBJECT_H__
 #define __RADEON_OBJECT_H__
 
-#include <ttm/ttm_bo_api.h>
-#include <ttm/ttm_bo_driver.h>
-#include <ttm/ttm_placement.h>
-#include <ttm/ttm_module.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
 
-/*
- * TTM.
+/**
+ * radeon_mem_type_to_domain - return domain corresponding to mem_type
+ * @mem_type:	ttm memory type
+ *
+ * Returns corresponding domain of the ttm mem_type
  */
-struct radeon_mman {
-	struct ttm_bo_global_ref        bo_global_ref;
-	struct ttm_global_reference	mem_global_ref;
-	bool				mem_global_referenced;
-	struct ttm_bo_device		bdev;
-};
+static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
+{
+	switch (mem_type) {
+	case TTM_PL_VRAM:
+		return RADEON_GEM_DOMAIN_VRAM;
+	case TTM_PL_TT:
+		return RADEON_GEM_DOMAIN_GTT;
+	case TTM_PL_SYSTEM:
+		return RADEON_GEM_DOMAIN_CPU;
+	default:
+		break;
+	}
+	return 0;
+}
 
+/**
+ * radeon_bo_reserve - reserve bo
+ * @bo:		bo structure
+ * @no_wait:		don't sleep while trying to reserve (return -EBUSY)
+ *
+ * Returns:
+ * -EBUSY: buffer is busy and @no_wait is true
+ * -ERESTART: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ */
+static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
+{
+	int r;
+
+retry:
+	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+	if (unlikely(r != 0)) {
+		if (r == -ERESTART)
+			goto retry;
+		dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
+		return r;
+	}
+	return 0;
+}
+
+static inline void radeon_bo_unreserve(struct radeon_bo *bo)
+{
+	ttm_bo_unreserve(&bo->tbo);
+}
+
+/**
+ * radeon_bo_gpu_offset - return GPU offset of bo
+ * @bo:	radeon object for which we query the offset
+ *
+ * Returns current GPU offset of the object.
+ *
+ * Note: object should either be pinned or reserved when calling this
+ * function, it might be usefull to add check for this for debugging.
+ */
+static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
+{
+	return bo->tbo.offset;
+}
+
+static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
+{
+	return bo->tbo.num_pages << PAGE_SHIFT;
+}
+
+static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
+{
+	return !!atomic_read(&bo->tbo.reserved);
+}
+
+/**
+ * radeon_bo_mmap_offset - return mmap offset of bo
+ * @bo:	radeon object for which we query the offset
+ *
+ * Returns mmap offset of the object.
+ *
+ * Note: addr_space_offset is constant after ttm bo init thus isn't protected
+ * by any lock.
+ */
+static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
+{
+	return bo->tbo.addr_space_offset;
+}
+
+static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
+					bool no_wait)
+{
+	int r;
+
+retry:
+	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+	if (unlikely(r != 0)) {
+		if (r == -ERESTART)
+			goto retry;
+		dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
+		return r;
+	}
+	spin_lock(&bo->tbo.lock);
+	if (mem_type)
+		*mem_type = bo->tbo.mem.mem_type;
+	if (bo->tbo.sync_obj)
+		r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+	spin_unlock(&bo->tbo.lock);
+	ttm_bo_unreserve(&bo->tbo);
+	if (unlikely(r == -ERESTART))
+		goto retry;
+	return r;
+}
+
+extern int radeon_bo_create(struct radeon_device *rdev,
+				struct drm_gem_object *gobj, unsigned long size,
+				bool kernel, u32 domain,
+				struct radeon_bo **bo_ptr);
+extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
+extern void radeon_bo_kunmap(struct radeon_bo *bo);
+extern void radeon_bo_unref(struct radeon_bo **bo);
+extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
+extern int radeon_bo_unpin(struct radeon_bo *bo);
+extern int radeon_bo_evict_vram(struct radeon_device *rdev);
+extern void radeon_bo_force_delete(struct radeon_device *rdev);
+extern int radeon_bo_init(struct radeon_device *rdev);
+extern void radeon_bo_fini(struct radeon_device *rdev);
+extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
+				struct list_head *head);
+extern int radeon_bo_list_reserve(struct list_head *head);
+extern void radeon_bo_list_unreserve(struct list_head *head);
+extern int radeon_bo_list_validate(struct list_head *head, void *fence);
+extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence);
+extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
+				struct vm_area_struct *vma);
+extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
+				u32 tiling_flags, u32 pitch);
+extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
+				u32 *tiling_flags, u32 *pitch);
+extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
+				bool force_drop);
+extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
+					struct ttm_mem_reg *mem);
+extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
 #endif
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 46146c6..34b08d3 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -27,7 +27,7 @@
 int radeon_pm_init(struct radeon_device *rdev)
 {
 	if (radeon_debugfs_pm_init(rdev)) {
-		DRM_ERROR("Failed to register debugfs file for CP !\n");
+		DRM_ERROR("Failed to register debugfs file for PM!\n");
 	}
 
 	return 0;
@@ -44,8 +44,8 @@
 	struct drm_device *dev = node->minor->dev;
 	struct radeon_device *rdev = dev->dev_private;
 
-	seq_printf(m, "engine clock: %u0 Hz\n", radeon_get_engine_clock(rdev));
-	seq_printf(m, "memory clock: %u0 Hz\n", radeon_get_memory_clock(rdev));
+	seq_printf(m, "engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
+	seq_printf(m, "memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 29ab759..6d0a009 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -887,6 +887,7 @@
 #       define RADEON_FP_PANEL_FORMAT          (1 <<  3)
 #       define RADEON_FP_EN_TMDS               (1 <<  7)
 #       define RADEON_FP_DETECT_SENSE          (1 <<  8)
+#       define RADEON_FP_DETECT_INT_POL        (1 <<  9)
 #       define R200_FP_SOURCE_SEL_MASK         (3 <<  10)
 #       define R200_FP_SOURCE_SEL_CRTC1        (0 <<  10)
 #       define R200_FP_SOURCE_SEL_CRTC2        (1 <<  10)
@@ -894,6 +895,7 @@
 #       define R200_FP_SOURCE_SEL_TRANS        (3 <<  10)
 #       define RADEON_FP_SEL_CRTC1             (0 << 13)
 #       define RADEON_FP_SEL_CRTC2             (1 << 13)
+#       define R300_HPD_SEL(x)                 ((x) << 13)
 #       define RADEON_FP_CRTC_DONT_SHADOW_HPAR (1 << 15)
 #       define RADEON_FP_CRTC_DONT_SHADOW_VPAR (1 << 16)
 #       define RADEON_FP_CRTC_DONT_SHADOW_HEND (1 << 17)
@@ -909,6 +911,7 @@
 #       define RADEON_FP2_ON                   (1 <<  2)
 #       define RADEON_FP2_PANEL_FORMAT         (1 <<  3)
 #       define RADEON_FP2_DETECT_SENSE         (1 <<  8)
+#       define RADEON_FP2_DETECT_INT_POL       (1 <<  9)
 #       define R200_FP2_SOURCE_SEL_MASK        (3 << 10)
 #       define R200_FP2_SOURCE_SEL_CRTC1       (0 << 10)
 #       define R200_FP2_SOURCE_SEL_CRTC2       (1 << 10)
@@ -988,14 +991,20 @@
 
 #define RADEON_GEN_INT_CNTL                 0x0040
 #	define RADEON_CRTC_VBLANK_MASK		(1 << 0)
+#	define RADEON_FP_DETECT_MASK		(1 << 4)
 #	define RADEON_CRTC2_VBLANK_MASK		(1 << 9)
+#	define RADEON_FP2_DETECT_MASK		(1 << 10)
 #	define RADEON_SW_INT_ENABLE		(1 << 25)
 #define RADEON_GEN_INT_STATUS               0x0044
 #	define AVIVO_DISPLAY_INT_STATUS		(1 << 0)
 #	define RADEON_CRTC_VBLANK_STAT		(1 << 0)
 #	define RADEON_CRTC_VBLANK_STAT_ACK	(1 << 0)
+#	define RADEON_FP_DETECT_STAT		(1 << 4)
+#	define RADEON_FP_DETECT_STAT_ACK	(1 << 4)
 #	define RADEON_CRTC2_VBLANK_STAT		(1 << 9)
 #	define RADEON_CRTC2_VBLANK_STAT_ACK	(1 << 9)
+#	define RADEON_FP2_DETECT_STAT		(1 << 10)
+#	define RADEON_FP2_DETECT_STAT_ACK	(1 << 10)
 #	define RADEON_SW_INT_FIRE		(1 << 26)
 #	define RADEON_SW_INT_TEST		(1 << 25)
 #	define RADEON_SW_INT_TEST_ACK		(1 << 25)
@@ -1051,20 +1060,25 @@
 
        /* Multimedia I2C bus */
 #define RADEON_I2C_CNTL_0		    0x0090
-#define RADEON_I2C_DONE (1<<0)
-#define RADEON_I2C_NACK (1<<1)
-#define RADEON_I2C_HALT (1<<2)
-#define RADEON_I2C_SOFT_RST (1<<5)
-#define RADEON_I2C_DRIVE_EN (1<<6)
-#define RADEON_I2C_DRIVE_SEL (1<<7)
-#define RADEON_I2C_START (1<<8)
-#define RADEON_I2C_STOP (1<<9)
-#define RADEON_I2C_RECEIVE (1<<10)
-#define RADEON_I2C_ABORT (1<<11)
-#define RADEON_I2C_GO (1<<12)
+#define RADEON_I2C_DONE                     (1 << 0)
+#define RADEON_I2C_NACK                     (1 << 1)
+#define RADEON_I2C_HALT                     (1 << 2)
+#define RADEON_I2C_SOFT_RST                 (1 << 5)
+#define RADEON_I2C_DRIVE_EN                 (1 << 6)
+#define RADEON_I2C_DRIVE_SEL                (1 << 7)
+#define RADEON_I2C_START                    (1 << 8)
+#define RADEON_I2C_STOP                     (1 << 9)
+#define RADEON_I2C_RECEIVE                  (1 << 10)
+#define RADEON_I2C_ABORT                    (1 << 11)
+#define RADEON_I2C_GO                       (1 << 12)
+#define RADEON_I2C_PRESCALE_SHIFT           16
 #define RADEON_I2C_CNTL_1                   0x0094
-#define RADEON_I2C_SEL         (1<<16)
-#define RADEON_I2C_EN          (1<<17)
+#define RADEON_I2C_DATA_COUNT_SHIFT         0
+#define RADEON_I2C_ADDR_COUNT_SHIFT         4
+#define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT   8
+#define RADEON_I2C_SEL                      (1 << 16)
+#define RADEON_I2C_EN                       (1 << 17)
+#define RADEON_I2C_TIME_LIMIT_SHIFT         24
 #define RADEON_I2C_DATA			    0x0098
 
 #define RADEON_DVI_I2C_CNTL_0		    0x02e0
@@ -1072,7 +1086,7 @@
 #       define R200_SEL_DDC1                0 /* 0x60 - VGA_DDC */
 #       define R200_SEL_DDC2                1 /* 0x64 - DVI_DDC */
 #       define R200_SEL_DDC3                2 /* 0x68 - MONID_DDC */
-#define RADEON_DVI_I2C_CNTL_1               0x02e4 /* ? */
+#define RADEON_DVI_I2C_CNTL_1               0x02e4
 #define RADEON_DVI_I2C_DATA		    0x02e8
 
 #define RADEON_INTERRUPT_LINE               0x0f3c /* PCI */
@@ -1143,15 +1157,16 @@
 #       define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13)
 #       define RADEON_MC_MCLK_DYN_ENABLE    (1 << 14)
 #       define RADEON_IO_MCLK_DYN_ENABLE    (1 << 15)
-#define RADEON_LCD_GPIO_MASK                0x01a0
-#define RADEON_GPIOPAD_EN                   0x01a0
-#define RADEON_LCD_GPIO_Y_REG               0x01a4
-#define RADEON_MDGPIO_A_REG                 0x01ac
-#define RADEON_MDGPIO_EN_REG                0x01b0
-#define RADEON_MDGPIO_MASK                  0x0198
+
 #define RADEON_GPIOPAD_MASK                 0x0198
 #define RADEON_GPIOPAD_A		    0x019c
-#define RADEON_MDGPIO_Y_REG                 0x01b4
+#define RADEON_GPIOPAD_EN                   0x01a0
+#define RADEON_GPIOPAD_Y                    0x01a4
+#define RADEON_MDGPIO_MASK                  0x01a8
+#define RADEON_MDGPIO_A                     0x01ac
+#define RADEON_MDGPIO_EN                    0x01b0
+#define RADEON_MDGPIO_Y                     0x01b4
+
 #define RADEON_MEM_ADDR_CONFIG              0x0148
 #define RADEON_MEM_BASE                     0x0f10 /* PCI */
 #define RADEON_MEM_CNTL                     0x0140
@@ -1360,6 +1375,9 @@
 #define RADEON_OVR_CLR                      0x0230
 #define RADEON_OVR_WID_LEFT_RIGHT           0x0234
 #define RADEON_OVR_WID_TOP_BOTTOM           0x0238
+#define RADEON_OVR2_CLR                     0x0330
+#define RADEON_OVR2_WID_LEFT_RIGHT          0x0334
+#define RADEON_OVR2_WID_TOP_BOTTOM          0x0338
 
 /* first capture unit */
 
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 747b4bf..4d12b2d 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -165,19 +165,24 @@
 		return 0;
 	/* Allocate 1M object buffer */
 	INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
-	r = radeon_object_create(rdev, NULL,  RADEON_IB_POOL_SIZE*64*1024,
-				 true, RADEON_GEM_DOMAIN_GTT,
-				 false, &rdev->ib_pool.robj);
+	r = radeon_bo_create(rdev, NULL,  RADEON_IB_POOL_SIZE*64*1024,
+				true, RADEON_GEM_DOMAIN_GTT,
+				&rdev->ib_pool.robj);
 	if (r) {
 		DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
 		return r;
 	}
-	r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
+	r = radeon_bo_reserve(rdev->ib_pool.robj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
 	if (r) {
+		radeon_bo_unreserve(rdev->ib_pool.robj);
 		DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
 		return r;
 	}
-	r = radeon_object_kmap(rdev->ib_pool.robj, &ptr);
+	r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
+	radeon_bo_unreserve(rdev->ib_pool.robj);
 	if (r) {
 		DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
 		return r;
@@ -203,14 +208,21 @@
 
 void radeon_ib_pool_fini(struct radeon_device *rdev)
 {
+	int r;
+
 	if (!rdev->ib_pool.ready) {
 		return;
 	}
 	mutex_lock(&rdev->ib_pool.mutex);
 	bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
 	if (rdev->ib_pool.robj) {
-		radeon_object_kunmap(rdev->ib_pool.robj);
-		radeon_object_unref(&rdev->ib_pool.robj);
+		r = radeon_bo_reserve(rdev->ib_pool.robj, false);
+		if (likely(r == 0)) {
+			radeon_bo_kunmap(rdev->ib_pool.robj);
+			radeon_bo_unpin(rdev->ib_pool.robj);
+			radeon_bo_unreserve(rdev->ib_pool.robj);
+		}
+		radeon_bo_unref(&rdev->ib_pool.robj);
 		rdev->ib_pool.robj = NULL;
 	}
 	mutex_unlock(&rdev->ib_pool.mutex);
@@ -288,29 +300,28 @@
 	rdev->cp.ring_size = ring_size;
 	/* Allocate ring buffer */
 	if (rdev->cp.ring_obj == NULL) {
-		r = radeon_object_create(rdev, NULL, rdev->cp.ring_size,
-					 true,
-					 RADEON_GEM_DOMAIN_GTT,
-					 false,
-					 &rdev->cp.ring_obj);
+		r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true,
+					RADEON_GEM_DOMAIN_GTT,
+					&rdev->cp.ring_obj);
 		if (r) {
-			DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r);
-			mutex_unlock(&rdev->cp.mutex);
+			dev_err(rdev->dev, "(%d) ring create failed\n", r);
 			return r;
 		}
-		r = radeon_object_pin(rdev->cp.ring_obj,
-				      RADEON_GEM_DOMAIN_GTT,
-				      &rdev->cp.gpu_addr);
+		r = radeon_bo_reserve(rdev->cp.ring_obj, false);
+		if (unlikely(r != 0))
+			return r;
+		r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
+					&rdev->cp.gpu_addr);
 		if (r) {
-			DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r);
-			mutex_unlock(&rdev->cp.mutex);
+			radeon_bo_unreserve(rdev->cp.ring_obj);
+			dev_err(rdev->dev, "(%d) ring pin failed\n", r);
 			return r;
 		}
-		r = radeon_object_kmap(rdev->cp.ring_obj,
+		r = radeon_bo_kmap(rdev->cp.ring_obj,
 				       (void **)&rdev->cp.ring);
+		radeon_bo_unreserve(rdev->cp.ring_obj);
 		if (r) {
-			DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r);
-			mutex_unlock(&rdev->cp.mutex);
+			dev_err(rdev->dev, "(%d) ring map failed\n", r);
 			return r;
 		}
 	}
@@ -321,11 +332,17 @@
 
 void radeon_ring_fini(struct radeon_device *rdev)
 {
+	int r;
+
 	mutex_lock(&rdev->cp.mutex);
 	if (rdev->cp.ring_obj) {
-		radeon_object_kunmap(rdev->cp.ring_obj);
-		radeon_object_unpin(rdev->cp.ring_obj);
-		radeon_object_unref(&rdev->cp.ring_obj);
+		r = radeon_bo_reserve(rdev->cp.ring_obj, false);
+		if (likely(r == 0)) {
+			radeon_bo_kunmap(rdev->cp.ring_obj);
+			radeon_bo_unpin(rdev->cp.ring_obj);
+			radeon_bo_unreserve(rdev->cp.ring_obj);
+		}
+		radeon_bo_unref(&rdev->cp.ring_obj);
 		rdev->cp.ring = NULL;
 		rdev->cp.ring_obj = NULL;
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index f8a465d..391c973 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -30,8 +30,8 @@
 /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
 void radeon_test_moves(struct radeon_device *rdev)
 {
-	struct radeon_object *vram_obj = NULL;
-	struct radeon_object **gtt_obj = NULL;
+	struct radeon_bo *vram_obj = NULL;
+	struct radeon_bo **gtt_obj = NULL;
 	struct radeon_fence *fence = NULL;
 	uint64_t gtt_addr, vram_addr;
 	unsigned i, n, size;
@@ -52,38 +52,42 @@
 		goto out_cleanup;
 	}
 
-	r = radeon_object_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM,
-				 false, &vram_obj);
+	r = radeon_bo_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM,
+				&vram_obj);
 	if (r) {
 		DRM_ERROR("Failed to create VRAM object\n");
 		goto out_cleanup;
 	}
-
-	r = radeon_object_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
+	r = radeon_bo_reserve(vram_obj, false);
+	if (unlikely(r != 0))
+		goto out_cleanup;
+	r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
 	if (r) {
 		DRM_ERROR("Failed to pin VRAM object\n");
 		goto out_cleanup;
 	}
-
 	for (i = 0; i < n; i++) {
 		void *gtt_map, *vram_map;
 		void **gtt_start, **gtt_end;
 		void **vram_start, **vram_end;
 
-		r = radeon_object_create(rdev, NULL, size, true,
-					 RADEON_GEM_DOMAIN_GTT, false, gtt_obj + i);
+		r = radeon_bo_create(rdev, NULL, size, true,
+					 RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
 		if (r) {
 			DRM_ERROR("Failed to create GTT object %d\n", i);
 			goto out_cleanup;
 		}
 
-		r = radeon_object_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
+		r = radeon_bo_reserve(gtt_obj[i], false);
+		if (unlikely(r != 0))
+			goto out_cleanup;
+		r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
 		if (r) {
 			DRM_ERROR("Failed to pin GTT object %d\n", i);
 			goto out_cleanup;
 		}
 
-		r = radeon_object_kmap(gtt_obj[i], &gtt_map);
+		r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
 		if (r) {
 			DRM_ERROR("Failed to map GTT object %d\n", i);
 			goto out_cleanup;
@@ -94,7 +98,7 @@
 		     gtt_start++)
 			*gtt_start = gtt_start;
 
-		radeon_object_kunmap(gtt_obj[i]);
+		radeon_bo_kunmap(gtt_obj[i]);
 
 		r = radeon_fence_create(rdev, &fence);
 		if (r) {
@@ -116,7 +120,7 @@
 
 		radeon_fence_unref(&fence);
 
-		r = radeon_object_kmap(vram_obj, &vram_map);
+		r = radeon_bo_kmap(vram_obj, &vram_map);
 		if (r) {
 			DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
 			goto out_cleanup;
@@ -131,13 +135,13 @@
 					  "expected 0x%p (GTT map 0x%p-0x%p)\n",
 					  i, *vram_start, gtt_start, gtt_map,
 					  gtt_end);
-				radeon_object_kunmap(vram_obj);
+				radeon_bo_kunmap(vram_obj);
 				goto out_cleanup;
 			}
 			*vram_start = vram_start;
 		}
 
-		radeon_object_kunmap(vram_obj);
+		radeon_bo_kunmap(vram_obj);
 
 		r = radeon_fence_create(rdev, &fence);
 		if (r) {
@@ -159,7 +163,7 @@
 
 		radeon_fence_unref(&fence);
 
-		r = radeon_object_kmap(gtt_obj[i], &gtt_map);
+		r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
 		if (r) {
 			DRM_ERROR("Failed to map GTT object after copy %d\n", i);
 			goto out_cleanup;
@@ -174,12 +178,12 @@
 					  "expected 0x%p (VRAM map 0x%p-0x%p)\n",
 					  i, *gtt_start, vram_start, vram_map,
 					  vram_end);
-				radeon_object_kunmap(gtt_obj[i]);
+				radeon_bo_kunmap(gtt_obj[i]);
 				goto out_cleanup;
 			}
 		}
 
-		radeon_object_kunmap(gtt_obj[i]);
+		radeon_bo_kunmap(gtt_obj[i]);
 
 		DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
 			 gtt_addr - rdev->mc.gtt_location);
@@ -187,14 +191,20 @@
 
 out_cleanup:
 	if (vram_obj) {
-		radeon_object_unpin(vram_obj);
-		radeon_object_unref(&vram_obj);
+		if (radeon_bo_is_reserved(vram_obj)) {
+			radeon_bo_unpin(vram_obj);
+			radeon_bo_unreserve(vram_obj);
+		}
+		radeon_bo_unref(&vram_obj);
 	}
 	if (gtt_obj) {
 		for (i = 0; i < n; i++) {
 			if (gtt_obj[i]) {
-				radeon_object_unpin(gtt_obj[i]);
-				radeon_object_unref(&gtt_obj[i]);
+				if (radeon_bo_is_reserved(gtt_obj[i])) {
+					radeon_bo_unpin(gtt_obj[i]);
+					radeon_bo_unreserve(gtt_obj[i]);
+				}
+				radeon_bo_unref(&gtt_obj[i]);
 			}
 		}
 		kfree(gtt_obj);
@@ -206,4 +216,3 @@
 		printk(KERN_WARNING "Error while testing BO move.\n");
 	}
 }
-
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index eda4ade..5a19d52 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -150,7 +150,7 @@
 		man->default_caching = TTM_PL_FLAG_CACHED;
 		break;
 	case TTM_PL_TT:
-		man->gpu_offset = 0;
+		man->gpu_offset = rdev->mc.gtt_location;
 		man->available_caching = TTM_PL_MASK_CACHING;
 		man->default_caching = TTM_PL_FLAG_CACHED;
 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@@ -180,7 +180,7 @@
 		break;
 	case TTM_PL_VRAM:
 		/* "On-card" video ram */
-		man->gpu_offset = 0;
+		man->gpu_offset = rdev->mc.vram_location;
 		man->flags = TTM_MEMTYPE_FLAG_FIXED |
 			     TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
 			     TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -197,16 +197,19 @@
 	return 0;
 }
 
-static uint32_t radeon_evict_flags(struct ttm_buffer_object *bo)
+static void radeon_evict_flags(struct ttm_buffer_object *bo,
+				struct ttm_placement *placement)
 {
-	uint32_t cur_placement = bo->mem.placement & ~TTM_PL_MASK_MEMTYPE;
-
+	struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
 	switch (bo->mem.mem_type) {
+	case TTM_PL_VRAM:
+		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+		break;
+	case TTM_PL_TT:
 	default:
-		return (cur_placement & ~TTM_PL_MASK_CACHING) |
-			TTM_PL_FLAG_SYSTEM |
-			TTM_PL_FLAG_CACHED;
+		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
 	}
+	*placement = rbo->placement;
 }
 
 static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
@@ -283,14 +286,21 @@
 	struct radeon_device *rdev;
 	struct ttm_mem_reg *old_mem = &bo->mem;
 	struct ttm_mem_reg tmp_mem;
-	uint32_t proposed_placement;
+	u32 placements;
+	struct ttm_placement placement;
 	int r;
 
 	rdev = radeon_get_rdev(bo->bdev);
 	tmp_mem = *new_mem;
 	tmp_mem.mm_node = NULL;
-	proposed_placement = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
-	r = ttm_bo_mem_space(bo, proposed_placement, &tmp_mem,
+	placement.fpfn = 0;
+	placement.lpfn = 0;
+	placement.num_placement = 1;
+	placement.placement = &placements;
+	placement.num_busy_placement = 1;
+	placement.busy_placement = &placements;
+	placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
 			     interruptible, no_wait);
 	if (unlikely(r)) {
 		return r;
@@ -329,15 +339,21 @@
 	struct radeon_device *rdev;
 	struct ttm_mem_reg *old_mem = &bo->mem;
 	struct ttm_mem_reg tmp_mem;
-	uint32_t proposed_flags;
+	struct ttm_placement placement;
+	u32 placements;
 	int r;
 
 	rdev = radeon_get_rdev(bo->bdev);
 	tmp_mem = *new_mem;
 	tmp_mem.mm_node = NULL;
-	proposed_flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
-	r = ttm_bo_mem_space(bo, proposed_flags, &tmp_mem,
-			     interruptible, no_wait);
+	placement.fpfn = 0;
+	placement.lpfn = 0;
+	placement.num_placement = 1;
+	placement.placement = &placements;
+	placement.num_busy_placement = 1;
+	placement.busy_placement = &placements;
+	placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+	r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
 	if (unlikely(r)) {
 		return r;
 	}
@@ -407,18 +423,6 @@
 	return r;
 }
 
-const uint32_t radeon_mem_prios[] = {
-	TTM_PL_VRAM,
-	TTM_PL_TT,
-	TTM_PL_SYSTEM,
-};
-
-const uint32_t radeon_busy_prios[] = {
-	TTM_PL_TT,
-	TTM_PL_VRAM,
-	TTM_PL_SYSTEM,
-};
-
 static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
 				bool lazy, bool interruptible)
 {
@@ -446,10 +450,6 @@
 }
 
 static struct ttm_bo_driver radeon_bo_driver = {
-	.mem_type_prio = radeon_mem_prios,
-	.mem_busy_prio = radeon_busy_prios,
-	.num_mem_type_prio = ARRAY_SIZE(radeon_mem_prios),
-	.num_mem_busy_prio = ARRAY_SIZE(radeon_busy_prios),
 	.create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
 	.invalidate_caches = &radeon_invalidate_caches,
 	.init_mem_type = &radeon_init_mem_type,
@@ -482,27 +482,31 @@
 		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
 		return r;
 	}
-	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0,
-			   ((rdev->mc.real_vram_size) >> PAGE_SHIFT));
+	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
+				rdev->mc.real_vram_size >> PAGE_SHIFT);
 	if (r) {
 		DRM_ERROR("Failed initializing VRAM heap.\n");
 		return r;
 	}
-	r = radeon_object_create(rdev, NULL, 256 * 1024, true,
-				 RADEON_GEM_DOMAIN_VRAM, false,
-				 &rdev->stollen_vga_memory);
+	r = radeon_bo_create(rdev, NULL, 256 * 1024, true,
+				RADEON_GEM_DOMAIN_VRAM,
+				&rdev->stollen_vga_memory);
 	if (r) {
 		return r;
 	}
-	r = radeon_object_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
+	r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
+	if (r)
+		return r;
+	r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
+	radeon_bo_unreserve(rdev->stollen_vga_memory);
 	if (r) {
-		radeon_object_unref(&rdev->stollen_vga_memory);
+		radeon_bo_unref(&rdev->stollen_vga_memory);
 		return r;
 	}
 	DRM_INFO("radeon: %uM of VRAM memory ready\n",
 		 (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
-	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0,
-			   ((rdev->mc.gtt_size) >> PAGE_SHIFT));
+	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
+				rdev->mc.gtt_size >> PAGE_SHIFT);
 	if (r) {
 		DRM_ERROR("Failed initializing GTT heap.\n");
 		return r;
@@ -523,9 +527,15 @@
 
 void radeon_ttm_fini(struct radeon_device *rdev)
 {
+	int r;
+
 	if (rdev->stollen_vga_memory) {
-		radeon_object_unpin(rdev->stollen_vga_memory);
-		radeon_object_unref(&rdev->stollen_vga_memory);
+		r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
+		if (r == 0) {
+			radeon_bo_unpin(rdev->stollen_vga_memory);
+			radeon_bo_unreserve(rdev->stollen_vga_memory);
+		}
+		radeon_bo_unref(&rdev->stollen_vga_memory);
 	}
 	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
 	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index ca03716..c1fcddd 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -352,7 +352,7 @@
 	u32 tmp;
 
 	/* Setup GPU memory space */
-	tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM));
+	tmp = RREG32(R_00015C_NB_TOM);
 	rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
 	rdev->mc.gtt_location = 0xFFFFFFFFUL;
 	r = radeon_mc_setup(rdev);
@@ -387,13 +387,13 @@
 	r300_clock_startup(rdev);
 	/* Initialize GPU configuration (# pipes, ...) */
 	rs400_gpu_init(rdev);
+	r100_enable_bm(rdev);
 	/* Initialize GART (initialize after TTM so we can allocate
 	 * memory through TTM but finalize after TTM) */
 	r = rs400_gart_enable(rdev);
 	if (r)
 		return r;
 	/* Enable IRQ */
-	rdev->irq.sw_int = true;
 	r100_irq_set(rdev);
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
@@ -430,6 +430,8 @@
 	radeon_combios_asic_init(rdev->ddev);
 	/* Resume clock after posting */
 	r300_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
 	return rs400_startup(rdev);
 }
 
@@ -452,7 +454,7 @@
 	rs400_gart_fini(rdev);
 	radeon_irq_kms_fini(rdev);
 	radeon_fence_driver_fini(rdev);
-	radeon_object_fini(rdev);
+	radeon_bo_fini(rdev);
 	radeon_atombios_fini(rdev);
 	kfree(rdev->bios);
 	rdev->bios = NULL;
@@ -490,10 +492,9 @@
 			RREG32(R_0007C0_CP_STAT));
 	}
 	/* check if cards are posted or not */
-	if (!radeon_card_posted(rdev) && rdev->bios) {
-		DRM_INFO("GPU not posted. posting now...\n");
-		radeon_combios_asic_init(rdev->ddev);
-	}
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+
 	/* Initialize clocks */
 	radeon_get_clock_info(rdev->ddev);
 	/* Get vram informations */
@@ -510,7 +511,7 @@
 	if (r)
 		return r;
 	/* Memory manager */
-	r = radeon_object_init(rdev);
+	r = radeon_bo_init(rdev);
 	if (r)
 		return r;
 	r = rs400_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5f117cd..4f8ea42 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -45,6 +45,122 @@
 void rs600_gpu_init(struct radeon_device *rdev);
 int rs600_mc_wait_for_idle(struct radeon_device *rdev);
 
+int rs600_mc_init(struct radeon_device *rdev)
+{
+	/* read back the MC value from the hw */
+	int r;
+	u32 tmp;
+
+	/* Setup GPU memory space */
+	tmp = RREG32_MC(R_000004_MC_FB_LOCATION);
+	rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
+	rdev->mc.gtt_location = 0xffffffffUL;
+	r = radeon_mc_setup(rdev);
+	if (r)
+		return r;
+	return 0;
+}
+
+/* hpd for digital panel detect/disconnect */
+bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+	u32 tmp;
+	bool connected = false;
+
+	switch (hpd) {
+	case RADEON_HPD_1:
+		tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS);
+		if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp))
+			connected = true;
+		break;
+	case RADEON_HPD_2:
+		tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS);
+		if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp))
+			connected = true;
+		break;
+	default:
+		break;
+	}
+	return connected;
+}
+
+void rs600_hpd_set_polarity(struct radeon_device *rdev,
+			    enum radeon_hpd_id hpd)
+{
+	u32 tmp;
+	bool connected = rs600_hpd_sense(rdev, hpd);
+
+	switch (hpd) {
+	case RADEON_HPD_1:
+		tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
+		if (connected)
+			tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
+		else
+			tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
+		WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+		break;
+	case RADEON_HPD_2:
+		tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
+		if (connected)
+			tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
+		else
+			tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
+		WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+		break;
+	default:
+		break;
+	}
+}
+
+void rs600_hpd_init(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		switch (radeon_connector->hpd.hpd) {
+		case RADEON_HPD_1:
+			WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
+			       S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
+			rdev->irq.hpd[0] = true;
+			break;
+		case RADEON_HPD_2:
+			WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
+			       S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
+			rdev->irq.hpd[1] = true;
+			break;
+		default:
+			break;
+		}
+	}
+	rs600_irq_set(rdev);
+}
+
+void rs600_hpd_fini(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		switch (radeon_connector->hpd.hpd) {
+		case RADEON_HPD_1:
+			WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
+			       S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
+			rdev->irq.hpd[0] = false;
+			break;
+		case RADEON_HPD_2:
+			WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
+			       S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
+			rdev->irq.hpd[1] = false;
+			break;
+		default:
+			break;
+		}
+	}
+}
+
 /*
  * GART.
  */
@@ -100,40 +216,40 @@
 	WREG32(R_00004C_BUS_CNTL, tmp);
 	/* FIXME: setup default page */
 	WREG32_MC(R_000100_MC_PT0_CNTL,
-		 (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
-		  S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
+		  (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
+		   S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
+
 	for (i = 0; i < 19; i++) {
 		WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
-			S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
-			S_00016C_SYSTEM_ACCESS_MODE_MASK(
-				V_00016C_SYSTEM_ACCESS_MODE_IN_SYS) |
-			S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
-				V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE) |
-			S_00016C_EFFECTIVE_L1_CACHE_SIZE(1) |
-			S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
-			S_00016C_EFFECTIVE_L1_QUEUE_SIZE(1));
+			  S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
+			  S_00016C_SYSTEM_ACCESS_MODE_MASK(
+				  V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) |
+			  S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
+				  V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) |
+			  S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) |
+			  S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
+			  S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3));
 	}
-
-	/* System context map to GART space */
-	WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_start);
-	WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.gtt_end);
-
 	/* enable first context */
-	WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
-	WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
 	WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
-			S_000102_ENABLE_PAGE_TABLE(1) |
-			S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
+		  S_000102_ENABLE_PAGE_TABLE(1) |
+		  S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
+
 	/* disable all other contexts */
-	for (i = 1; i < 8; i++) {
+	for (i = 1; i < 8; i++)
 		WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
-	}
 
 	/* setup the page table */
 	WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
-			rdev->gart.table_addr);
+		  rdev->gart.table_addr);
+	WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
+	WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
 	WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
 
+	/* System context maps to VRAM space */
+	WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start);
+	WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end);
+
 	/* enable page tables */
 	tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
 	WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
@@ -146,15 +262,20 @@
 
 void rs600_gart_disable(struct radeon_device *rdev)
 {
-	uint32_t tmp;
+	u32 tmp;
+	int r;
 
 	/* FIXME: disable out of gart access */
 	WREG32_MC(R_000100_MC_PT0_CNTL, 0);
 	tmp = RREG32_MC(R_000009_MC_CNTL1);
 	WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
 	if (rdev->gart.table.vram.robj) {
-		radeon_object_kunmap(rdev->gart.table.vram.robj);
-		radeon_object_unpin(rdev->gart.table.vram.robj);
+		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+		if (r == 0) {
+			radeon_bo_kunmap(rdev->gart.table.vram.robj);
+			radeon_bo_unpin(rdev->gart.table.vram.robj);
+			radeon_bo_unreserve(rdev->gart.table.vram.robj);
+		}
 	}
 }
 
@@ -189,6 +310,10 @@
 {
 	uint32_t tmp = 0;
 	uint32_t mode_int = 0;
+	u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) &
+		~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
+	u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
+		~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
 
 	if (rdev->irq.sw_int) {
 		tmp |= S_000040_SW_INT_EN(1);
@@ -199,8 +324,16 @@
 	if (rdev->irq.crtc_vblank_int[1]) {
 		mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
 	}
+	if (rdev->irq.hpd[0]) {
+		hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
+	}
+	if (rdev->irq.hpd[1]) {
+		hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
+	}
 	WREG32(R_000040_GEN_INT_CNTL, tmp);
 	WREG32(R_006540_DxMODE_INT_MASK, mode_int);
+	WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
+	WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
 	return 0;
 }
 
@@ -208,6 +341,7 @@
 {
 	uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
 	uint32_t irq_mask = ~C_000044_SW_INT;
+	u32 tmp;
 
 	if (G_000044_DISPLAY_INT_STAT(irqs)) {
 		*r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
@@ -219,6 +353,16 @@
 			WREG32(R_006D34_D2MODE_VBLANK_STATUS,
 				S_006D34_D2MODE_VBLANK_ACK(1));
 		}
+		if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) {
+			tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
+			tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
+			WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+		}
+		if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) {
+			tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
+			tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
+			WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+		}
 	} else {
 		*r500_disp_int = 0;
 	}
@@ -244,6 +388,7 @@
 {
 	uint32_t status, msi_rearm;
 	uint32_t r500_disp_int;
+	bool queue_hotplug = false;
 
 	status = rs600_irq_ack(rdev, &r500_disp_int);
 	if (!status && !r500_disp_int) {
@@ -258,8 +403,18 @@
 			drm_handle_vblank(rdev->ddev, 0);
 		if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int))
 			drm_handle_vblank(rdev->ddev, 1);
+		if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
+			queue_hotplug = true;
+			DRM_DEBUG("HPD1\n");
+		}
+		if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(r500_disp_int)) {
+			queue_hotplug = true;
+			DRM_DEBUG("HPD2\n");
+		}
 		status = rs600_irq_ack(rdev, &r500_disp_int);
 	}
+	if (queue_hotplug)
+		queue_work(rdev->wq, &rdev->hotplug_work);
 	if (rdev->msi_enabled) {
 		switch (rdev->family) {
 		case CHIP_RS600:
@@ -301,9 +456,7 @@
 
 void rs600_gpu_init(struct radeon_device *rdev)
 {
-	/* FIXME: HDP same place on rs600 ? */
 	r100_hdp_reset(rdev);
-	/* FIXME: is this correct ? */
 	r420_pipes_init(rdev);
 	/* Wait for mc idle */
 	if (rs600_mc_wait_for_idle(rdev))
@@ -312,9 +465,20 @@
 
 void rs600_vram_info(struct radeon_device *rdev)
 {
-	/* FIXME: to do or is these values sane ? */
 	rdev->mc.vram_is_ddr = true;
 	rdev->mc.vram_width = 128;
+
+	rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+	rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+
+	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+
+	if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
+		rdev->mc.mc_vram_size = rdev->mc.aper_size;
+
+	if (rdev->mc.real_vram_size > rdev->mc.aper_size)
+		rdev->mc.real_vram_size = rdev->mc.aper_size;
 }
 
 void rs600_bandwidth_update(struct radeon_device *rdev)
@@ -388,7 +552,6 @@
 	if (r)
 		return r;
 	/* Enable IRQ */
-	rdev->irq.sw_int = true;
 	rs600_irq_set(rdev);
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
@@ -423,6 +586,8 @@
 	atom_asic_init(rdev->mode_info.atom_context);
 	/* Resume clock after posting */
 	rv515_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
 	return rs600_startup(rdev);
 }
 
@@ -445,7 +610,7 @@
 	rs600_gart_fini(rdev);
 	radeon_irq_kms_fini(rdev);
 	radeon_fence_driver_fini(rdev);
-	radeon_object_fini(rdev);
+	radeon_bo_fini(rdev);
 	radeon_atombios_fini(rdev);
 	kfree(rdev->bios);
 	rdev->bios = NULL;
@@ -482,10 +647,9 @@
 			RREG32(R_0007C0_CP_STAT));
 	}
 	/* check if cards are posted or not */
-	if (!radeon_card_posted(rdev) && rdev->bios) {
-		DRM_INFO("GPU not posted. posting now...\n");
-		atom_asic_init(rdev->mode_info.atom_context);
-	}
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+
 	/* Initialize clocks */
 	radeon_get_clock_info(rdev->ddev);
 	/* Initialize power management */
@@ -493,7 +657,7 @@
 	/* Get vram informations */
 	rs600_vram_info(rdev);
 	/* Initialize memory controller (also test AGP) */
-	r = r420_mc_init(rdev);
+	r = rs600_mc_init(rdev);
 	if (r)
 		return r;
 	rs600_debugfs(rdev);
@@ -505,7 +669,7 @@
 	if (r)
 		return r;
 	/* Memory manager */
-	r = radeon_object_init(rdev);
+	r = radeon_bo_init(rdev);
 	if (r)
 		return r;
 	r = rs600_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
index 8130892..c1c8f58 100644
--- a/drivers/gpu/drm/radeon/rs600d.h
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -30,27 +30,12 @@
 
 /* Registers */
 #define R_000040_GEN_INT_CNTL                        0x000040
-#define   S_000040_DISPLAY_INT_STATUS(x)               (((x) & 0x1) << 0)
-#define   G_000040_DISPLAY_INT_STATUS(x)               (((x) >> 0) & 0x1)
-#define   C_000040_DISPLAY_INT_STATUS                  0xFFFFFFFE
-#define   S_000040_DMA_VIPH0_INT_EN(x)                 (((x) & 0x1) << 12)
-#define   G_000040_DMA_VIPH0_INT_EN(x)                 (((x) >> 12) & 0x1)
-#define   C_000040_DMA_VIPH0_INT_EN                    0xFFFFEFFF
-#define   S_000040_CRTC2_VSYNC(x)                      (((x) & 0x1) << 6)
-#define   G_000040_CRTC2_VSYNC(x)                      (((x) >> 6) & 0x1)
-#define   C_000040_CRTC2_VSYNC                         0xFFFFFFBF
-#define   S_000040_SNAPSHOT2(x)                        (((x) & 0x1) << 7)
-#define   G_000040_SNAPSHOT2(x)                        (((x) >> 7) & 0x1)
-#define   C_000040_SNAPSHOT2                           0xFFFFFF7F
-#define   S_000040_CRTC2_VBLANK(x)                     (((x) & 0x1) << 9)
-#define   G_000040_CRTC2_VBLANK(x)                     (((x) >> 9) & 0x1)
-#define   C_000040_CRTC2_VBLANK                        0xFFFFFDFF
-#define   S_000040_FP2_DETECT(x)                       (((x) & 0x1) << 10)
-#define   G_000040_FP2_DETECT(x)                       (((x) >> 10) & 0x1)
-#define   C_000040_FP2_DETECT                          0xFFFFFBFF
-#define   S_000040_VSYNC_DIFF_OVER_LIMIT(x)            (((x) & 0x1) << 11)
-#define   G_000040_VSYNC_DIFF_OVER_LIMIT(x)            (((x) >> 11) & 0x1)
-#define   C_000040_VSYNC_DIFF_OVER_LIMIT               0xFFFFF7FF
+#define   S_000040_SCRATCH_INT_MASK(x)                 (((x) & 0x1) << 18)
+#define   G_000040_SCRATCH_INT_MASK(x)                 (((x) >> 18) & 0x1)
+#define   C_000040_SCRATCH_INT_MASK                    0xFFFBFFFF
+#define   S_000040_GUI_IDLE_MASK(x)                    (((x) & 0x1) << 19)
+#define   G_000040_GUI_IDLE_MASK(x)                    (((x) >> 19) & 0x1)
+#define   C_000040_GUI_IDLE_MASK                       0xFFF7FFFF
 #define   S_000040_DMA_VIPH1_INT_EN(x)                 (((x) & 0x1) << 13)
 #define   G_000040_DMA_VIPH1_INT_EN(x)                 (((x) >> 13) & 0x1)
 #define   C_000040_DMA_VIPH1_INT_EN                    0xFFFFDFFF
@@ -370,7 +355,90 @@
 #define   S_007EDC_LB_D2_VBLANK_INTERRUPT(x)           (((x) & 0x1) << 5)
 #define   G_007EDC_LB_D2_VBLANK_INTERRUPT(x)           (((x) >> 5) & 0x1)
 #define   C_007EDC_LB_D2_VBLANK_INTERRUPT              0xFFFFFFDF
-
+#define   S_007EDC_DACA_AUTODETECT_INTERRUPT(x)        (((x) & 0x1) << 16)
+#define   G_007EDC_DACA_AUTODETECT_INTERRUPT(x)        (((x) >> 16) & 0x1)
+#define   C_007EDC_DACA_AUTODETECT_INTERRUPT           0xFFFEFFFF
+#define   S_007EDC_DACB_AUTODETECT_INTERRUPT(x)        (((x) & 0x1) << 17)
+#define   G_007EDC_DACB_AUTODETECT_INTERRUPT(x)        (((x) >> 17) & 0x1)
+#define   C_007EDC_DACB_AUTODETECT_INTERRUPT           0xFFFDFFFF
+#define   S_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x)    (((x) & 0x1) << 18)
+#define   G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x)    (((x) >> 18) & 0x1)
+#define   C_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT       0xFFFBFFFF
+#define   S_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x)    (((x) & 0x1) << 19)
+#define   G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x)    (((x) >> 19) & 0x1)
+#define   C_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT       0xFFF7FFFF
+#define R_007828_DACA_AUTODETECT_CONTROL               0x007828
+#define   S_007828_DACA_AUTODETECT_MODE(x)             (((x) & 0x3) << 0)
+#define   G_007828_DACA_AUTODETECT_MODE(x)             (((x) >> 0) & 0x3)
+#define   C_007828_DACA_AUTODETECT_MODE                0xFFFFFFFC
+#define   S_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8)
+#define   G_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff)
+#define   C_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER  0xFFFF00FF
+#define   S_007828_DACA_AUTODETECT_CHECK_MASK(x)       (((x) & 0x3) << 16)
+#define   G_007828_DACA_AUTODETECT_CHECK_MASK(x)       (((x) >> 16) & 0x3)
+#define   C_007828_DACA_AUTODETECT_CHECK_MASK          0xFFFCFFFF
+#define R_007838_DACA_AUTODETECT_INT_CONTROL           0x007838
+#define   S_007838_DACA_AUTODETECT_ACK(x)              (((x) & 0x1) << 0)
+#define   C_007838_DACA_DACA_AUTODETECT_ACK            0xFFFFFFFE
+#define   S_007838_DACA_AUTODETECT_INT_ENABLE(x)       (((x) & 0x1) << 16)
+#define   G_007838_DACA_AUTODETECT_INT_ENABLE(x)       (((x) >> 16) & 0x1)
+#define   C_007838_DACA_AUTODETECT_INT_ENABLE          0xFFFCFFFF
+#define R_007A28_DACB_AUTODETECT_CONTROL               0x007A28
+#define   S_007A28_DACB_AUTODETECT_MODE(x)             (((x) & 0x3) << 0)
+#define   G_007A28_DACB_AUTODETECT_MODE(x)             (((x) >> 0) & 0x3)
+#define   C_007A28_DACB_AUTODETECT_MODE                0xFFFFFFFC
+#define   S_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8)
+#define   G_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff)
+#define   C_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER  0xFFFF00FF
+#define   S_007A28_DACB_AUTODETECT_CHECK_MASK(x)       (((x) & 0x3) << 16)
+#define   G_007A28_DACB_AUTODETECT_CHECK_MASK(x)       (((x) >> 16) & 0x3)
+#define   C_007A28_DACB_AUTODETECT_CHECK_MASK          0xFFFCFFFF
+#define R_007A38_DACB_AUTODETECT_INT_CONTROL           0x007A38
+#define   S_007A38_DACB_AUTODETECT_ACK(x)              (((x) & 0x1) << 0)
+#define   C_007A38_DACB_DACA_AUTODETECT_ACK            0xFFFFFFFE
+#define   S_007A38_DACB_AUTODETECT_INT_ENABLE(x)       (((x) & 0x1) << 16)
+#define   G_007A38_DACB_AUTODETECT_INT_ENABLE(x)       (((x) >> 16) & 0x1)
+#define   C_007A38_DACB_AUTODETECT_INT_ENABLE          0xFFFCFFFF
+#define R_007D00_DC_HOT_PLUG_DETECT1_CONTROL           0x007D00
+#define   S_007D00_DC_HOT_PLUG_DETECT1_EN(x)           (((x) & 0x1) << 0)
+#define   G_007D00_DC_HOT_PLUG_DETECT1_EN(x)           (((x) >> 0) & 0x1)
+#define   C_007D00_DC_HOT_PLUG_DETECT1_EN              0xFFFFFFFE
+#define R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS        0x007D04
+#define   S_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x)   (((x) & 0x1) << 0)
+#define   G_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x)   (((x) >> 0) & 0x1)
+#define   C_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS      0xFFFFFFFE
+#define   S_007D04_DC_HOT_PLUG_DETECT1_SENSE(x)        (((x) & 0x1) << 1)
+#define   G_007D04_DC_HOT_PLUG_DETECT1_SENSE(x)        (((x) >> 1) & 0x1)
+#define   C_007D04_DC_HOT_PLUG_DETECT1_SENSE           0xFFFFFFFD
+#define R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL       0x007D08
+#define   S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(x)      (((x) & 0x1) << 0)
+#define   C_007D08_DC_HOT_PLUG_DETECT1_INT_ACK         0xFFFFFFFE
+#define   S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) & 0x1) << 8)
+#define   G_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) >> 8) & 0x1)
+#define   C_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY    0xFFFFFEFF
+#define   S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x)       (((x) & 0x1) << 16)
+#define   G_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x)       (((x) >> 16) & 0x1)
+#define   C_007D08_DC_HOT_PLUG_DETECT1_INT_EN          0xFFFEFFFF
+#define R_007D10_DC_HOT_PLUG_DETECT2_CONTROL           0x007D10
+#define   S_007D10_DC_HOT_PLUG_DETECT2_EN(x)           (((x) & 0x1) << 0)
+#define   G_007D10_DC_HOT_PLUG_DETECT2_EN(x)           (((x) >> 0) & 0x1)
+#define   C_007D10_DC_HOT_PLUG_DETECT2_EN              0xFFFFFFFE
+#define R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS        0x007D14
+#define   S_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x)   (((x) & 0x1) << 0)
+#define   G_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x)   (((x) >> 0) & 0x1)
+#define   C_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS      0xFFFFFFFE
+#define   S_007D14_DC_HOT_PLUG_DETECT2_SENSE(x)        (((x) & 0x1) << 1)
+#define   G_007D14_DC_HOT_PLUG_DETECT2_SENSE(x)        (((x) >> 1) & 0x1)
+#define   C_007D14_DC_HOT_PLUG_DETECT2_SENSE           0xFFFFFFFD
+#define R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL       0x007D18
+#define   S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(x)      (((x) & 0x1) << 0)
+#define   C_007D18_DC_HOT_PLUG_DETECT2_INT_ACK         0xFFFFFFFE
+#define   S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) & 0x1) << 8)
+#define   G_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) >> 8) & 0x1)
+#define   C_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY    0xFFFFFEFF
+#define   S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x)       (((x) & 0x1) << 16)
+#define   G_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x)       (((x) >> 16) & 0x1)
+#define   C_007D18_DC_HOT_PLUG_DETECT2_INT_EN          0xFFFEFFFF
 
 /* MC registers */
 #define R_000000_MC_STATUS                           0x000000
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 2754717..1e22f52 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -131,24 +131,25 @@
 
 void rs690_vram_info(struct radeon_device *rdev)
 {
-	uint32_t tmp;
 	fixed20_12 a;
 
 	rs400_gart_adjust_size(rdev);
-	/* DDR for all card after R300 & IGP */
+
 	rdev->mc.vram_is_ddr = true;
-	/* FIXME: is this correct for RS690/RS740 ? */
-	tmp = RREG32(RADEON_MEM_CNTL);
-	if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
-		rdev->mc.vram_width = 128;
-	} else {
-		rdev->mc.vram_width = 64;
-	}
+	rdev->mc.vram_width = 128;
+
 	rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
 	rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
 
 	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
 	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+
+	if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
+		rdev->mc.mc_vram_size = rdev->mc.aper_size;
+
+	if (rdev->mc.real_vram_size > rdev->mc.aper_size)
+		rdev->mc.real_vram_size = rdev->mc.aper_size;
+
 	rs690_pm_info(rdev);
 	/* FIXME: we should enforce default clock in case GPU is not in
 	 * default setup
@@ -161,6 +162,21 @@
 	rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
 }
 
+static int rs690_mc_init(struct radeon_device *rdev)
+{
+	int r;
+	u32 tmp;
+
+	/* Setup GPU memory space */
+	tmp = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
+	rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16;
+	rdev->mc.gtt_location = 0xFFFFFFFFUL;
+	r = radeon_mc_setup(rdev);
+	if (r)
+		return r;
+	return 0;
+}
+
 void rs690_line_buffer_adjust(struct radeon_device *rdev,
 			      struct drm_display_mode *mode1,
 			      struct drm_display_mode *mode2)
@@ -244,8 +260,9 @@
 
 	b.full = rfixed_const(mode->crtc_hdisplay);
 	c.full = rfixed_const(256);
-	a.full = rfixed_mul(wm->num_line_pair, b);
-	request_fifo_depth.full = rfixed_div(a, c);
+	a.full = rfixed_div(b, c);
+	request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
+	request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
 	if (a.full < rfixed_const(4)) {
 		wm->lb_request_fifo_depth = 4;
 	} else {
@@ -374,6 +391,7 @@
 	a.full = rfixed_const(16);
 	wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
 	wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
+	wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
 
 	/* Determine estimated width */
 	estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
@@ -383,6 +401,7 @@
 	} else {
 		a.full = rfixed_const(16);
 		wm->priority_mark.full = rfixed_div(estimated_width, a);
+		wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
 		wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
 	}
 }
@@ -605,7 +624,6 @@
 	if (r)
 		return r;
 	/* Enable IRQ */
-	rdev->irq.sw_int = true;
 	rs600_irq_set(rdev);
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
@@ -640,6 +658,8 @@
 	atom_asic_init(rdev->mode_info.atom_context);
 	/* Resume clock after posting */
 	rv515_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
 	return rs690_startup(rdev);
 }
 
@@ -662,7 +682,7 @@
 	rs400_gart_fini(rdev);
 	radeon_irq_kms_fini(rdev);
 	radeon_fence_driver_fini(rdev);
-	radeon_object_fini(rdev);
+	radeon_bo_fini(rdev);
 	radeon_atombios_fini(rdev);
 	kfree(rdev->bios);
 	rdev->bios = NULL;
@@ -700,10 +720,9 @@
 			RREG32(R_0007C0_CP_STAT));
 	}
 	/* check if cards are posted or not */
-	if (!radeon_card_posted(rdev) && rdev->bios) {
-		DRM_INFO("GPU not posted. posting now...\n");
-		atom_asic_init(rdev->mode_info.atom_context);
-	}
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+
 	/* Initialize clocks */
 	radeon_get_clock_info(rdev->ddev);
 	/* Initialize power management */
@@ -711,7 +730,7 @@
 	/* Get vram informations */
 	rs690_vram_info(rdev);
 	/* Initialize memory controller (also test AGP) */
-	r = r420_mc_init(rdev);
+	r = rs690_mc_init(rdev);
 	if (r)
 		return r;
 	rv515_debugfs(rdev);
@@ -723,7 +742,7 @@
 	if (r)
 		return r;
 	/* Memory manager */
-	r = radeon_object_init(rdev);
+	r = radeon_bo_init(rdev);
 	if (r)
 		return r;
 	r = rs400_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index ba68c9f..59632a5 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -478,7 +478,6 @@
 			return r;
 	}
 	/* Enable IRQ */
-	rdev->irq.sw_int = true;
 	rs600_irq_set(rdev);
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
@@ -514,6 +513,8 @@
 	atom_asic_init(rdev->mode_info.atom_context);
 	/* Resume clock after posting */
 	rv515_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
 	return rv515_startup(rdev);
 }
 
@@ -540,11 +541,11 @@
 	r100_wb_fini(rdev);
 	r100_ib_fini(rdev);
 	radeon_gem_fini(rdev);
-    rv370_pcie_gart_fini(rdev);
+	rv370_pcie_gart_fini(rdev);
 	radeon_agp_fini(rdev);
 	radeon_irq_kms_fini(rdev);
 	radeon_fence_driver_fini(rdev);
-	radeon_object_fini(rdev);
+	radeon_bo_fini(rdev);
 	radeon_atombios_fini(rdev);
 	kfree(rdev->bios);
 	rdev->bios = NULL;
@@ -580,10 +581,8 @@
 			RREG32(R_0007C0_CP_STAT));
 	}
 	/* check if cards are posted or not */
-	if (!radeon_card_posted(rdev) && rdev->bios) {
-		DRM_INFO("GPU not posted. posting now...\n");
-		atom_asic_init(rdev->mode_info.atom_context);
-	}
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
 	/* Initialize clocks */
 	radeon_get_clock_info(rdev->ddev);
 	/* Initialize power management */
@@ -603,7 +602,7 @@
 	if (r)
 		return r;
 	/* Memory manager */
-	r = radeon_object_init(rdev);
+	r = radeon_bo_init(rdev);
 	if (r)
 		return r;
 	r = rv370_pcie_gart_init(rdev);
@@ -892,8 +891,9 @@
 
 	b.full = rfixed_const(mode->crtc_hdisplay);
 	c.full = rfixed_const(256);
-	a.full = rfixed_mul(wm->num_line_pair, b);
-	request_fifo_depth.full = rfixed_div(a, c);
+	a.full = rfixed_div(b, c);
+	request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
+	request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
 	if (a.full < rfixed_const(4)) {
 		wm->lb_request_fifo_depth = 4;
 	} else {
@@ -995,15 +995,17 @@
 	a.full = rfixed_const(16);
 	wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
 	wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
+	wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
 
 	/* Determine estimated width */
 	estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
 	estimated_width.full = rfixed_div(estimated_width, consumption_time);
 	if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
-		wm->priority_mark.full = rfixed_const(10);
+		wm->priority_mark.full = wm->priority_mark_max.full;
 	} else {
 		a.full = rfixed_const(16);
 		wm->priority_mark.full = rfixed_div(estimated_width, a);
+		wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
 		wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
 	}
 }
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 5e06ee7..fbb0357 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -92,7 +92,7 @@
 void rv770_pcie_gart_disable(struct radeon_device *rdev)
 {
 	u32 tmp;
-	int i;
+	int i, r;
 
 	/* Disable all tables */
 	for (i = 0; i < 7; i++)
@@ -113,8 +113,12 @@
 	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
 	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
 	if (rdev->gart.table.vram.robj) {
-		radeon_object_kunmap(rdev->gart.table.vram.robj);
-		radeon_object_unpin(rdev->gart.table.vram.robj);
+		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+		if (likely(r == 0)) {
+			radeon_bo_kunmap(rdev->gart.table.vram.robj);
+			radeon_bo_unpin(rdev->gart.table.vram.robj);
+			radeon_bo_unreserve(rdev->gart.table.vram.robj);
+		}
 	}
 }
 
@@ -870,6 +874,14 @@
 {
 	int r;
 
+	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+		r = r600_init_microcode(rdev);
+		if (r) {
+			DRM_ERROR("Failed to load firmware!\n");
+			return r;
+		}
+	}
+
 	rv770_mc_program(rdev);
 	if (rdev->flags & RADEON_IS_AGP) {
 		rv770_agp_enable(rdev);
@@ -880,13 +892,26 @@
 	}
 	rv770_gpu_init(rdev);
 
-	r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
-			      &rdev->r600_blit.shader_gpu_addr);
+	r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+			&rdev->r600_blit.shader_gpu_addr);
+	radeon_bo_unreserve(rdev->r600_blit.shader_obj);
 	if (r) {
 		DRM_ERROR("failed to pin blit object %d\n", r);
 		return r;
 	}
 
+	/* Enable IRQ */
+	r = r600_irq_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: IH init failed (%d).\n", r);
+		radeon_irq_kms_fini(rdev);
+		return r;
+	}
+	r600_irq_set(rdev);
+
 	r = radeon_ring_init(rdev, rdev->cp.ring_size);
 	if (r)
 		return r;
@@ -934,13 +959,19 @@
 
 int rv770_suspend(struct radeon_device *rdev)
 {
+	int r;
+
 	/* FIXME: we should wait for ring to be empty */
 	r700_cp_stop(rdev);
 	rdev->cp.ready = false;
 	r600_wb_disable(rdev);
 	rv770_pcie_gart_disable(rdev);
 	/* unpin shaders bo */
-        radeon_object_unpin(rdev->r600_blit.shader_obj);
+	r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+	if (likely(r == 0)) {
+		radeon_bo_unpin(rdev->r600_blit.shader_obj);
+		radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+	}
 	return 0;
 }
 
@@ -975,7 +1006,11 @@
 	if (r)
 		return r;
 	/* Post card if necessary */
-	if (!r600_card_posted(rdev) && rdev->bios) {
+	if (!r600_card_posted(rdev)) {
+		if (!rdev->bios) {
+			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+			return -EINVAL;
+		}
 		DRM_INFO("GPU not posted. posting now...\n");
 		atom_asic_init(rdev->mode_info.atom_context);
 	}
@@ -998,31 +1033,31 @@
 	if (r)
 		return r;
 	/* Memory manager */
-	r = radeon_object_init(rdev);
+	r = radeon_bo_init(rdev);
 	if (r)
 		return r;
+
+	r = radeon_irq_kms_init(rdev);
+	if (r)
+		return r;
+
 	rdev->cp.ring_obj = NULL;
 	r600_ring_init(rdev, 1024 * 1024);
 
-	if (!rdev->me_fw || !rdev->pfp_fw) {
-		r = r600_cp_init_microcode(rdev);
-		if (r) {
-			DRM_ERROR("Failed to load firmware!\n");
-			return r;
-		}
-	}
+	rdev->ih.ring_obj = NULL;
+	r600_ih_ring_init(rdev, 64 * 1024);
 
 	r = r600_pcie_gart_init(rdev);
 	if (r)
 		return r;
 
-	rdev->accel_working = true;
 	r = r600_blit_init(rdev);
 	if (r) {
-		DRM_ERROR("radeon: failled blitter (%d).\n", r);
-		rdev->accel_working = false;
+		DRM_ERROR("radeon: failed blitter (%d).\n", r);
+		return r;
 	}
 
+	rdev->accel_working = true;
 	r = rv770_startup(rdev);
 	if (r) {
 		rv770_suspend(rdev);
@@ -1034,12 +1069,12 @@
 	if (rdev->accel_working) {
 		r = radeon_ib_pool_init(rdev);
 		if (r) {
-			DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
+			DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
 			rdev->accel_working = false;
 		}
 		r = r600_ib_test(rdev);
 		if (r) {
-			DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+			DRM_ERROR("radeon: failed testing IB (%d).\n", r);
 			rdev->accel_working = false;
 		}
 	}
@@ -1051,6 +1086,8 @@
 	rv770_suspend(rdev);
 
 	r600_blit_fini(rdev);
+	r600_irq_fini(rdev);
+	radeon_irq_kms_fini(rdev);
 	radeon_ring_fini(rdev);
 	r600_wb_fini(rdev);
 	rv770_pcie_gart_fini(rdev);
@@ -1059,7 +1096,7 @@
 	radeon_clocks_fini(rdev);
 	if (rdev->flags & RADEON_IS_AGP)
 		radeon_agp_fini(rdev);
-	radeon_object_fini(rdev);
+	radeon_bo_fini(rdev);
 	radeon_atombios_fini(rdev);
 	kfree(rdev->bios);
 	rdev->bios = NULL;
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index b0a9de7..1e138f5 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -3,6 +3,7 @@
 
 ccflags-y := -Iinclude/drm
 ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
-	ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o
+	ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
+	ttm_object.o ttm_lock.o ttm_execbuf_util.o
 
 obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 87c0625..1fbb2ee 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -27,6 +27,14 @@
 /*
  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  */
+/* Notes:
+ *
+ * We store bo pointer in drm_mm_node struct so we know which bo own a
+ * specific node. There is no protection on the pointer, thus to make
+ * sure things don't go berserk you have to access this pointer while
+ * holding the global lru lock and make sure anytime you free a node you
+ * reset the pointer to NULL.
+ */
 
 #include "ttm/ttm_module.h"
 #include "ttm/ttm_bo_driver.h"
@@ -51,6 +59,60 @@
 	.mode = S_IRUGO
 };
 
+static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
+{
+	int i;
+
+	for (i = 0; i <= TTM_PL_PRIV5; i++)
+		if (flags & (1 << i)) {
+			*mem_type = i;
+			return 0;
+		}
+	return -EINVAL;
+}
+
+static void ttm_mem_type_manager_debug(struct ttm_bo_global *glob,
+					struct ttm_mem_type_manager *man)
+{
+	printk(KERN_ERR TTM_PFX "    has_type: %d\n", man->has_type);
+	printk(KERN_ERR TTM_PFX "    use_type: %d\n", man->use_type);
+	printk(KERN_ERR TTM_PFX "    flags: 0x%08X\n", man->flags);
+	printk(KERN_ERR TTM_PFX "    gpu_offset: 0x%08lX\n", man->gpu_offset);
+	printk(KERN_ERR TTM_PFX "    io_offset: 0x%08lX\n", man->io_offset);
+	printk(KERN_ERR TTM_PFX "    io_size: %ld\n", man->io_size);
+	printk(KERN_ERR TTM_PFX "    size: %ld\n", (unsigned long)man->size);
+	printk(KERN_ERR TTM_PFX "    available_caching: 0x%08X\n",
+		man->available_caching);
+	printk(KERN_ERR TTM_PFX "    default_caching: 0x%08X\n",
+		man->default_caching);
+	spin_lock(&glob->lru_lock);
+	drm_mm_debug_table(&man->manager, TTM_PFX);
+	spin_unlock(&glob->lru_lock);
+}
+
+static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
+					struct ttm_placement *placement)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_bo_global *glob = bo->glob;
+	struct ttm_mem_type_manager *man;
+	int i, ret, mem_type;
+
+	printk(KERN_ERR TTM_PFX "No space for %p (%ld pages, %ldK, %ldM)\n",
+		bo, bo->mem.num_pages, bo->mem.size >> 10,
+		bo->mem.size >> 20);
+	for (i = 0; i < placement->num_placement; i++) {
+		ret = ttm_mem_type_from_flags(placement->placement[i],
+						&mem_type);
+		if (ret)
+			return;
+		man = &bdev->man[mem_type];
+		printk(KERN_ERR TTM_PFX "  placement[%d]=0x%08X (%d)\n",
+			i, placement->placement[i], mem_type);
+		ttm_mem_type_manager_debug(glob, man);
+	}
+}
+
 static ssize_t ttm_bo_global_show(struct kobject *kobj,
 				  struct attribute *attr,
 				  char *buffer)
@@ -117,12 +179,13 @@
 		ret = wait_event_interruptible(bo->event_queue,
 					       atomic_read(&bo->reserved) == 0);
 		if (unlikely(ret != 0))
-			return -ERESTART;
+			return ret;
 	} else {
 		wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
 	}
 	return 0;
 }
+EXPORT_SYMBOL(ttm_bo_wait_unreserved);
 
 static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
 {
@@ -247,7 +310,6 @@
 /*
  * Call bo->mutex locked.
  */
-
 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
@@ -275,9 +337,10 @@
 		bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
 					page_flags | TTM_PAGE_FLAG_USER,
 					glob->dummy_read_page);
-		if (unlikely(bo->ttm == NULL))
+		if (unlikely(bo->ttm == NULL)) {
 			ret = -ENOMEM;
-		break;
+			break;
+		}
 
 		ret = ttm_tt_set_user(bo->ttm, current,
 				      bo->buffer_start, bo->num_pages);
@@ -328,14 +391,8 @@
 		}
 
 		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
-
-			struct ttm_mem_reg *old_mem = &bo->mem;
-			uint32_t save_flags = old_mem->placement;
-
-			*old_mem = *mem;
+			bo->mem = *mem;
 			mem->mm_node = NULL;
-			ttm_flag_masked(&save_flags, mem->placement,
-					TTM_PL_MASK_MEMTYPE);
 			goto moved;
 		}
 
@@ -418,6 +475,7 @@
 			kref_put(&bo->list_kref, ttm_bo_ref_bug);
 		}
 		if (bo->mem.mm_node) {
+			bo->mem.mm_node->private = NULL;
 			drm_mm_put_block(bo->mem.mm_node);
 			bo->mem.mm_node = NULL;
 		}
@@ -554,24 +612,21 @@
 }
 EXPORT_SYMBOL(ttm_bo_unref);
 
-static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
-			bool interruptible, bool no_wait)
+static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
+			bool no_wait)
 {
-	int ret = 0;
 	struct ttm_bo_device *bdev = bo->bdev;
 	struct ttm_bo_global *glob = bo->glob;
 	struct ttm_mem_reg evict_mem;
-	uint32_t proposed_placement;
-
-	if (bo->mem.mem_type != mem_type)
-		goto out;
+	struct ttm_placement placement;
+	int ret = 0;
 
 	spin_lock(&bo->lock);
 	ret = ttm_bo_wait(bo, false, interruptible, no_wait);
 	spin_unlock(&bo->lock);
 
 	if (unlikely(ret != 0)) {
-		if (ret != -ERESTART) {
+		if (ret != -ERESTARTSYS) {
 			printk(KERN_ERR TTM_PFX
 			       "Failed to expire sync object before "
 			       "buffer eviction.\n");
@@ -584,116 +639,139 @@
 	evict_mem = bo->mem;
 	evict_mem.mm_node = NULL;
 
-	proposed_placement = bdev->driver->evict_flags(bo);
-
-	ret = ttm_bo_mem_space(bo, proposed_placement,
-			       &evict_mem, interruptible, no_wait);
-	if (unlikely(ret != 0 && ret != -ERESTART))
-		ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
-				       &evict_mem, interruptible, no_wait);
-
+	placement.fpfn = 0;
+	placement.lpfn = 0;
+	placement.num_placement = 0;
+	placement.num_busy_placement = 0;
+	bdev->driver->evict_flags(bo, &placement);
+	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
+				no_wait);
 	if (ret) {
-		if (ret != -ERESTART)
+		if (ret != -ERESTARTSYS) {
 			printk(KERN_ERR TTM_PFX
 			       "Failed to find memory space for "
 			       "buffer 0x%p eviction.\n", bo);
+			ttm_bo_mem_space_debug(bo, &placement);
+		}
 		goto out;
 	}
 
 	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
 				     no_wait);
 	if (ret) {
-		if (ret != -ERESTART)
+		if (ret != -ERESTARTSYS)
 			printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
+		spin_lock(&glob->lru_lock);
+		if (evict_mem.mm_node) {
+			evict_mem.mm_node->private = NULL;
+			drm_mm_put_block(evict_mem.mm_node);
+			evict_mem.mm_node = NULL;
+		}
+		spin_unlock(&glob->lru_lock);
 		goto out;
 	}
-
-	spin_lock(&glob->lru_lock);
-	if (evict_mem.mm_node) {
-		drm_mm_put_block(evict_mem.mm_node);
-		evict_mem.mm_node = NULL;
-	}
-	spin_unlock(&glob->lru_lock);
 	bo->evicted = true;
 out:
 	return ret;
 }
 
+static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
+				uint32_t mem_type,
+				bool interruptible, bool no_wait)
+{
+	struct ttm_bo_global *glob = bdev->glob;
+	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+	struct ttm_buffer_object *bo;
+	int ret, put_count = 0;
+
+	spin_lock(&glob->lru_lock);
+	bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
+	kref_get(&bo->list_kref);
+	ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0);
+	if (likely(ret == 0))
+		put_count = ttm_bo_del_from_lru(bo);
+	spin_unlock(&glob->lru_lock);
+	if (unlikely(ret != 0))
+		return ret;
+	while (put_count--)
+		kref_put(&bo->list_kref, ttm_bo_ref_bug);
+	ret = ttm_bo_evict(bo, interruptible, no_wait);
+	ttm_bo_unreserve(bo);
+	kref_put(&bo->list_kref, ttm_bo_release_list);
+	return ret;
+}
+
+static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
+				struct ttm_mem_type_manager *man,
+				struct ttm_placement *placement,
+				struct ttm_mem_reg *mem,
+				struct drm_mm_node **node)
+{
+	struct ttm_bo_global *glob = bo->glob;
+	unsigned long lpfn;
+	int ret;
+
+	lpfn = placement->lpfn;
+	if (!lpfn)
+		lpfn = man->size;
+	*node = NULL;
+	do {
+		ret = drm_mm_pre_get(&man->manager);
+		if (unlikely(ret))
+			return ret;
+
+		spin_lock(&glob->lru_lock);
+		*node = drm_mm_search_free_in_range(&man->manager,
+					mem->num_pages, mem->page_alignment,
+					placement->fpfn, lpfn, 1);
+		if (unlikely(*node == NULL)) {
+			spin_unlock(&glob->lru_lock);
+			return 0;
+		}
+		*node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
+							mem->page_alignment,
+							placement->fpfn,
+							lpfn);
+		spin_unlock(&glob->lru_lock);
+	} while (*node == NULL);
+	return 0;
+}
+
 /**
  * Repeatedly evict memory from the LRU for @mem_type until we create enough
  * space, or we've evicted everything and there isn't enough space.
  */
-static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
-				  struct ttm_mem_reg *mem,
-				  uint32_t mem_type,
-				  bool interruptible, bool no_wait)
+static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
+					uint32_t mem_type,
+					struct ttm_placement *placement,
+					struct ttm_mem_reg *mem,
+					bool interruptible, bool no_wait)
 {
+	struct ttm_bo_device *bdev = bo->bdev;
 	struct ttm_bo_global *glob = bdev->glob;
-	struct drm_mm_node *node;
-	struct ttm_buffer_object *entry;
 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
-	struct list_head *lru;
-	unsigned long num_pages = mem->num_pages;
-	int put_count = 0;
+	struct drm_mm_node *node;
 	int ret;
 
-retry_pre_get:
-	ret = drm_mm_pre_get(&man->manager);
-	if (unlikely(ret != 0))
-		return ret;
-
-	spin_lock(&glob->lru_lock);
 	do {
-		node = drm_mm_search_free(&man->manager, num_pages,
-					  mem->page_alignment, 1);
-		if (node)
-			break;
-
-		lru = &man->lru;
-		if (list_empty(lru))
-			break;
-
-		entry = list_first_entry(lru, struct ttm_buffer_object, lru);
-		kref_get(&entry->list_kref);
-
-		ret =
-		    ttm_bo_reserve_locked(entry, interruptible, no_wait,
-					  false, 0);
-
-		if (likely(ret == 0))
-			put_count = ttm_bo_del_from_lru(entry);
-
-		spin_unlock(&glob->lru_lock);
-
+		ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
 		if (unlikely(ret != 0))
 			return ret;
-
-		while (put_count--)
-			kref_put(&entry->list_kref, ttm_bo_ref_bug);
-
-		ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
-
-		ttm_bo_unreserve(entry);
-
-		kref_put(&entry->list_kref, ttm_bo_release_list);
-		if (ret)
-			return ret;
-
+		if (node)
+			break;
 		spin_lock(&glob->lru_lock);
+		if (list_empty(&man->lru)) {
+			spin_unlock(&glob->lru_lock);
+			break;
+		}
+		spin_unlock(&glob->lru_lock);
+		ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
+						no_wait);
+		if (unlikely(ret != 0))
+			return ret;
 	} while (1);
-
-	if (!node) {
-		spin_unlock(&glob->lru_lock);
+	if (node == NULL)
 		return -ENOMEM;
-	}
-
-	node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
-	if (unlikely(!node)) {
-		spin_unlock(&glob->lru_lock);
-		goto retry_pre_get;
-	}
-
-	spin_unlock(&glob->lru_lock);
 	mem->mm_node = node;
 	mem->mem_type = mem_type;
 	return 0;
@@ -724,7 +802,6 @@
 	return result;
 }
 
-
 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
 				 bool disallow_fixed,
 				 uint32_t mem_type,
@@ -757,66 +834,55 @@
  * space.
  */
 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
-		     uint32_t proposed_placement,
-		     struct ttm_mem_reg *mem,
-		     bool interruptible, bool no_wait)
+			struct ttm_placement *placement,
+			struct ttm_mem_reg *mem,
+			bool interruptible, bool no_wait)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
-	struct ttm_bo_global *glob = bo->glob;
 	struct ttm_mem_type_manager *man;
-
-	uint32_t num_prios = bdev->driver->num_mem_type_prio;
-	const uint32_t *prios = bdev->driver->mem_type_prio;
-	uint32_t i;
 	uint32_t mem_type = TTM_PL_SYSTEM;
 	uint32_t cur_flags = 0;
 	bool type_found = false;
 	bool type_ok = false;
-	bool has_eagain = false;
+	bool has_erestartsys = false;
 	struct drm_mm_node *node = NULL;
-	int ret;
+	int i, ret;
 
 	mem->mm_node = NULL;
-	for (i = 0; i < num_prios; ++i) {
-		mem_type = prios[i];
+	for (i = 0; i <= placement->num_placement; ++i) {
+		ret = ttm_mem_type_from_flags(placement->placement[i],
+						&mem_type);
+		if (ret)
+			return ret;
 		man = &bdev->man[mem_type];
 
 		type_ok = ttm_bo_mt_compatible(man,
-					       bo->type == ttm_bo_type_user,
-					       mem_type, proposed_placement,
-					       &cur_flags);
+						bo->type == ttm_bo_type_user,
+						mem_type,
+						placement->placement[i],
+						&cur_flags);
 
 		if (!type_ok)
 			continue;
 
 		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
 						  cur_flags);
+		/*
+		 * Use the access and other non-mapping-related flag bits from
+		 * the memory placement flags to the current flags
+		 */
+		ttm_flag_masked(&cur_flags, placement->placement[i],
+				~TTM_PL_MASK_MEMTYPE);
 
 		if (mem_type == TTM_PL_SYSTEM)
 			break;
 
 		if (man->has_type && man->use_type) {
 			type_found = true;
-			do {
-				ret = drm_mm_pre_get(&man->manager);
-				if (unlikely(ret))
-					return ret;
-
-				spin_lock(&glob->lru_lock);
-				node = drm_mm_search_free(&man->manager,
-							  mem->num_pages,
-							  mem->page_alignment,
-							  1);
-				if (unlikely(!node)) {
-					spin_unlock(&glob->lru_lock);
-					break;
-				}
-				node = drm_mm_get_block_atomic(node,
-							       mem->num_pages,
-							       mem->
-							       page_alignment);
-				spin_unlock(&glob->lru_lock);
-			} while (!node);
+			ret = ttm_bo_man_get_node(bo, man, placement, mem,
+							&node);
+			if (unlikely(ret))
+				return ret;
 		}
 		if (node)
 			break;
@@ -826,67 +892,66 @@
 		mem->mm_node = node;
 		mem->mem_type = mem_type;
 		mem->placement = cur_flags;
+		if (node)
+			node->private = bo;
 		return 0;
 	}
 
 	if (!type_found)
 		return -EINVAL;
 
-	num_prios = bdev->driver->num_mem_busy_prio;
-	prios = bdev->driver->mem_busy_prio;
-
-	for (i = 0; i < num_prios; ++i) {
-		mem_type = prios[i];
+	for (i = 0; i <= placement->num_busy_placement; ++i) {
+		ret = ttm_mem_type_from_flags(placement->placement[i],
+						&mem_type);
+		if (ret)
+			return ret;
 		man = &bdev->man[mem_type];
-
 		if (!man->has_type)
 			continue;
-
 		if (!ttm_bo_mt_compatible(man,
-					  bo->type == ttm_bo_type_user,
-					  mem_type,
-					  proposed_placement, &cur_flags))
+						bo->type == ttm_bo_type_user,
+						mem_type,
+						placement->placement[i],
+						&cur_flags))
 			continue;
 
 		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
 						  cur_flags);
+		/*
+		 * Use the access and other non-mapping-related flag bits from
+		 * the memory placement flags to the current flags
+		 */
+		ttm_flag_masked(&cur_flags, placement->placement[i],
+				~TTM_PL_MASK_MEMTYPE);
 
-		ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
-					     interruptible, no_wait);
-
+		ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
+						interruptible, no_wait);
 		if (ret == 0 && mem->mm_node) {
 			mem->placement = cur_flags;
+			mem->mm_node->private = bo;
 			return 0;
 		}
-
-		if (ret == -ERESTART)
-			has_eagain = true;
+		if (ret == -ERESTARTSYS)
+			has_erestartsys = true;
 	}
-
-	ret = (has_eagain) ? -ERESTART : -ENOMEM;
+	ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
 	return ret;
 }
 EXPORT_SYMBOL(ttm_bo_mem_space);
 
 int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
 {
-	int ret = 0;
-
 	if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
 		return -EBUSY;
 
-	ret = wait_event_interruptible(bo->event_queue,
-				       atomic_read(&bo->cpu_writers) == 0);
-
-	if (ret == -ERESTARTSYS)
-		ret = -ERESTART;
-
-	return ret;
+	return wait_event_interruptible(bo->event_queue,
+					atomic_read(&bo->cpu_writers) == 0);
 }
+EXPORT_SYMBOL(ttm_bo_wait_cpu);
 
 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
-		       uint32_t proposed_placement,
-		       bool interruptible, bool no_wait)
+			struct ttm_placement *placement,
+			bool interruptible, bool no_wait)
 {
 	struct ttm_bo_global *glob = bo->glob;
 	int ret = 0;
@@ -899,147 +964,132 @@
 	 * Have the driver move function wait for idle when necessary,
 	 * instead of doing it here.
 	 */
-
 	spin_lock(&bo->lock);
 	ret = ttm_bo_wait(bo, false, interruptible, no_wait);
 	spin_unlock(&bo->lock);
-
 	if (ret)
 		return ret;
-
 	mem.num_pages = bo->num_pages;
 	mem.size = mem.num_pages << PAGE_SHIFT;
 	mem.page_alignment = bo->mem.page_alignment;
-
 	/*
 	 * Determine where to move the buffer.
 	 */
-
-	ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
-			       interruptible, no_wait);
+	ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
 	if (ret)
 		goto out_unlock;
-
 	ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
-
 out_unlock:
 	if (ret && mem.mm_node) {
 		spin_lock(&glob->lru_lock);
+		mem.mm_node->private = NULL;
 		drm_mm_put_block(mem.mm_node);
 		spin_unlock(&glob->lru_lock);
 	}
 	return ret;
 }
 
-static int ttm_bo_mem_compat(uint32_t proposed_placement,
+static int ttm_bo_mem_compat(struct ttm_placement *placement,
 			     struct ttm_mem_reg *mem)
 {
-	if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0)
-		return 0;
-	if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
-		return 0;
+	int i;
 
-	return 1;
+	for (i = 0; i < placement->num_placement; i++) {
+		if ((placement->placement[i] & mem->placement &
+			TTM_PL_MASK_CACHING) &&
+			(placement->placement[i] & mem->placement &
+			TTM_PL_MASK_MEM))
+			return i;
+	}
+	return -1;
 }
 
-int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
-			       uint32_t proposed_placement,
-			       bool interruptible, bool no_wait)
+int ttm_bo_validate(struct ttm_buffer_object *bo,
+			struct ttm_placement *placement,
+			bool interruptible, bool no_wait)
 {
 	int ret;
 
 	BUG_ON(!atomic_read(&bo->reserved));
-	bo->proposed_placement = proposed_placement;
-
-	TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n",
-		  (unsigned long)proposed_placement,
-		  (unsigned long)bo->mem.placement);
-
+	/* Check that range is valid */
+	if (placement->lpfn || placement->fpfn)
+		if (placement->fpfn > placement->lpfn ||
+			(placement->lpfn - placement->fpfn) < bo->num_pages)
+			return -EINVAL;
 	/*
 	 * Check whether we need to move buffer.
 	 */
-
-	if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) {
-		ret = ttm_bo_move_buffer(bo, bo->proposed_placement,
-					 interruptible, no_wait);
-		if (ret) {
-			if (ret != -ERESTART)
-				printk(KERN_ERR TTM_PFX
-				       "Failed moving buffer. "
-				       "Proposed placement 0x%08x\n",
-				       bo->proposed_placement);
-			if (ret == -ENOMEM)
-				printk(KERN_ERR TTM_PFX
-				       "Out of aperture space or "
-				       "DRM memory quota.\n");
+	ret = ttm_bo_mem_compat(placement, &bo->mem);
+	if (ret < 0) {
+		ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
+		if (ret)
 			return ret;
-		}
+	} else {
+		/*
+		 * Use the access and other non-mapping-related flag bits from
+		 * the compatible memory placement flags to the active flags
+		 */
+		ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
+				~TTM_PL_MASK_MEMTYPE);
 	}
-
 	/*
 	 * We might need to add a TTM.
 	 */
-
 	if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
 		ret = ttm_bo_add_ttm(bo, true);
 		if (ret)
 			return ret;
 	}
-	/*
-	 * Validation has succeeded, move the access and other
-	 * non-mapping-related flag bits from the proposed flags to
-	 * the active flags
-	 */
-
-	ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
-			~TTM_PL_MASK_MEMTYPE);
-
 	return 0;
 }
-EXPORT_SYMBOL(ttm_buffer_object_validate);
+EXPORT_SYMBOL(ttm_bo_validate);
 
-int
-ttm_bo_check_placement(struct ttm_buffer_object *bo,
-		       uint32_t set_flags, uint32_t clr_flags)
+int ttm_bo_check_placement(struct ttm_buffer_object *bo,
+				struct ttm_placement *placement)
 {
-	uint32_t new_mask = set_flags | clr_flags;
+	int i;
 
-	if ((bo->type == ttm_bo_type_user) &&
-	    (clr_flags & TTM_PL_FLAG_CACHED)) {
-		printk(KERN_ERR TTM_PFX
-		       "User buffers require cache-coherent memory.\n");
-		return -EINVAL;
-	}
-
-	if (!capable(CAP_SYS_ADMIN)) {
-		if (new_mask & TTM_PL_FLAG_NO_EVICT) {
-			printk(KERN_ERR TTM_PFX "Need to be root to modify"
-			       " NO_EVICT status.\n");
+	if (placement->fpfn || placement->lpfn) {
+		if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
+			printk(KERN_ERR TTM_PFX "Page number range to small "
+				"Need %lu pages, range is [%u, %u]\n",
+				bo->mem.num_pages, placement->fpfn,
+				placement->lpfn);
 			return -EINVAL;
 		}
-
-		if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) &&
-		    (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
-			printk(KERN_ERR TTM_PFX
-			       "Incompatible memory specification"
-			       " for NO_EVICT buffer.\n");
-			return -EINVAL;
+	}
+	for (i = 0; i < placement->num_placement; i++) {
+		if (!capable(CAP_SYS_ADMIN)) {
+			if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
+				printk(KERN_ERR TTM_PFX "Need to be root to "
+					"modify NO_EVICT status.\n");
+				return -EINVAL;
+			}
+		}
+	}
+	for (i = 0; i < placement->num_busy_placement; i++) {
+		if (!capable(CAP_SYS_ADMIN)) {
+			if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
+				printk(KERN_ERR TTM_PFX "Need to be root to "
+					"modify NO_EVICT status.\n");
+				return -EINVAL;
+			}
 		}
 	}
 	return 0;
 }
 
-int ttm_buffer_object_init(struct ttm_bo_device *bdev,
-			   struct ttm_buffer_object *bo,
-			   unsigned long size,
-			   enum ttm_bo_type type,
-			   uint32_t flags,
-			   uint32_t page_alignment,
-			   unsigned long buffer_start,
-			   bool interruptible,
-			   struct file *persistant_swap_storage,
-			   size_t acc_size,
-			   void (*destroy) (struct ttm_buffer_object *))
+int ttm_bo_init(struct ttm_bo_device *bdev,
+		struct ttm_buffer_object *bo,
+		unsigned long size,
+		enum ttm_bo_type type,
+		struct ttm_placement *placement,
+		uint32_t page_alignment,
+		unsigned long buffer_start,
+		bool interruptible,
+		struct file *persistant_swap_storage,
+		size_t acc_size,
+		void (*destroy) (struct ttm_buffer_object *))
 {
 	int ret = 0;
 	unsigned long num_pages;
@@ -1077,29 +1127,21 @@
 	bo->acc_size = acc_size;
 	atomic_inc(&bo->glob->bo_count);
 
-	ret = ttm_bo_check_placement(bo, flags, 0ULL);
+	ret = ttm_bo_check_placement(bo, placement);
 	if (unlikely(ret != 0))
 		goto out_err;
 
 	/*
-	 * If no caching attributes are set, accept any form of caching.
-	 */
-
-	if ((flags & TTM_PL_MASK_CACHING) == 0)
-		flags |= TTM_PL_MASK_CACHING;
-
-	/*
 	 * For ttm_bo_type_device buffers, allocate
 	 * address space from the device.
 	 */
-
 	if (bo->type == ttm_bo_type_device) {
 		ret = ttm_bo_setup_vm(bo);
 		if (ret)
 			goto out_err;
 	}
 
-	ret = ttm_buffer_object_validate(bo, flags, interruptible, false);
+	ret = ttm_bo_validate(bo, placement, interruptible, false);
 	if (ret)
 		goto out_err;
 
@@ -1112,7 +1154,7 @@
 
 	return ret;
 }
-EXPORT_SYMBOL(ttm_buffer_object_init);
+EXPORT_SYMBOL(ttm_bo_init);
 
 static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
 				 unsigned long num_pages)
@@ -1123,19 +1165,19 @@
 	return glob->ttm_bo_size + 2 * page_array_size;
 }
 
-int ttm_buffer_object_create(struct ttm_bo_device *bdev,
-			     unsigned long size,
-			     enum ttm_bo_type type,
-			     uint32_t flags,
-			     uint32_t page_alignment,
-			     unsigned long buffer_start,
-			     bool interruptible,
-			     struct file *persistant_swap_storage,
-			     struct ttm_buffer_object **p_bo)
+int ttm_bo_create(struct ttm_bo_device *bdev,
+			unsigned long size,
+			enum ttm_bo_type type,
+			struct ttm_placement *placement,
+			uint32_t page_alignment,
+			unsigned long buffer_start,
+			bool interruptible,
+			struct file *persistant_swap_storage,
+			struct ttm_buffer_object **p_bo)
 {
 	struct ttm_buffer_object *bo;
-	int ret;
 	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+	int ret;
 
 	size_t acc_size =
 	    ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
@@ -1150,76 +1192,41 @@
 		return -ENOMEM;
 	}
 
-	ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
-				     page_alignment, buffer_start,
-				     interruptible,
-				     persistant_swap_storage, acc_size, NULL);
+	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
+				buffer_start, interruptible,
+				persistant_swap_storage, acc_size, NULL);
 	if (likely(ret == 0))
 		*p_bo = bo;
 
 	return ret;
 }
 
-static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
-			     uint32_t mem_type, bool allow_errors)
-{
-	int ret;
-
-	spin_lock(&bo->lock);
-	ret = ttm_bo_wait(bo, false, false, false);
-	spin_unlock(&bo->lock);
-
-	if (ret && allow_errors)
-		goto out;
-
-	if (bo->mem.mem_type == mem_type)
-		ret = ttm_bo_evict(bo, mem_type, false, false);
-
-	if (ret) {
-		if (allow_errors) {
-			goto out;
-		} else {
-			ret = 0;
-			printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
-		}
-	}
-
-out:
-	return ret;
-}
-
 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
-				   struct list_head *head,
-				   unsigned mem_type, bool allow_errors)
+					unsigned mem_type, bool allow_errors)
 {
+	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 	struct ttm_bo_global *glob = bdev->glob;
-	struct ttm_buffer_object *entry;
 	int ret;
-	int put_count;
 
 	/*
 	 * Can't use standard list traversal since we're unlocking.
 	 */
 
 	spin_lock(&glob->lru_lock);
-
-	while (!list_empty(head)) {
-		entry = list_first_entry(head, struct ttm_buffer_object, lru);
-		kref_get(&entry->list_kref);
-		ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
-		put_count = ttm_bo_del_from_lru(entry);
+	while (!list_empty(&man->lru)) {
 		spin_unlock(&glob->lru_lock);
-		while (put_count--)
-			kref_put(&entry->list_kref, ttm_bo_ref_bug);
-		BUG_ON(ret);
-		ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
-		ttm_bo_unreserve(entry);
-		kref_put(&entry->list_kref, ttm_bo_release_list);
+		ret = ttm_mem_evict_first(bdev, mem_type, false, false);
+		if (ret) {
+			if (allow_errors) {
+				return ret;
+			} else {
+				printk(KERN_ERR TTM_PFX
+					"Cleanup eviction failed\n");
+			}
+		}
 		spin_lock(&glob->lru_lock);
 	}
-
 	spin_unlock(&glob->lru_lock);
-
 	return 0;
 }
 
@@ -1246,7 +1253,7 @@
 
 	ret = 0;
 	if (mem_type > 0) {
-		ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
+		ttm_bo_force_list_clean(bdev, mem_type, false);
 
 		spin_lock(&glob->lru_lock);
 		if (drm_mm_clean(&man->manager))
@@ -1279,12 +1286,12 @@
 		return 0;
 	}
 
-	return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
+	return ttm_bo_force_list_clean(bdev, mem_type, true);
 }
 EXPORT_SYMBOL(ttm_bo_evict_mm);
 
 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
-		   unsigned long p_offset, unsigned long p_size)
+			unsigned long p_size)
 {
 	int ret = -EINVAL;
 	struct ttm_mem_type_manager *man;
@@ -1314,7 +1321,7 @@
 			       type);
 			return ret;
 		}
-		ret = drm_mm_init(&man->manager, p_offset, p_size);
+		ret = drm_mm_init(&man->manager, 0, p_size);
 		if (ret)
 			return ret;
 	}
@@ -1463,7 +1470,7 @@
 	 * Initialize the system memory buffer type.
 	 * Other types need to be driver / IOCTL initialized.
 	 */
-	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
+	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
 	if (unlikely(ret != 0))
 		goto out_no_sys;
 
@@ -1693,7 +1700,7 @@
 			ret = wait_event_interruptible
 			    (bo->event_queue, atomic_read(&bo->reserved) == 0);
 			if (unlikely(ret != 0))
-				return -ERESTART;
+				return ret;
 		} else {
 			wait_event(bo->event_queue,
 				   atomic_read(&bo->reserved) == 0);
@@ -1722,12 +1729,14 @@
 	ttm_bo_unreserve(bo);
 	return ret;
 }
+EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
 
 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
 {
 	if (atomic_dec_and_test(&bo->cpu_writers))
 		wake_up_all(&bo->event_queue);
 }
+EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
 
 /**
  * A buffer object shrink method that tries to swap out the first
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 61c5572..2ecf7d0 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -369,6 +369,7 @@
 #endif
 	return tmp;
 }
+EXPORT_SYMBOL(ttm_io_prot);
 
 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
 			  unsigned long bus_base,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 1c040d0..609a85a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -114,7 +114,7 @@
 		ret = ttm_bo_wait(bo, false, true, false);
 		spin_unlock(&bo->lock);
 		if (unlikely(ret != 0)) {
-			retval = (ret != -ERESTART) ?
+			retval = (ret != -ERESTARTSYS) ?
 			    VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
 			goto out_unlock;
 		}
@@ -349,9 +349,6 @@
 	switch (ret) {
 	case 0:
 		break;
-	case -ERESTART:
-		ret = -EINTR;
-		goto out_unref;
 	case -EBUSY:
 		ret = -EAGAIN;
 		goto out_unref;
@@ -421,8 +418,6 @@
 	switch (ret) {
 	case 0:
 		break;
-	case -ERESTART:
-		return -EINTR;
 	case -EBUSY:
 		return -EAGAIN;
 	default:
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
new file mode 100644
index 0000000..c285c29
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -0,0 +1,117 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "ttm/ttm_execbuf_util.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+
+void ttm_eu_backoff_reservation(struct list_head *list)
+{
+	struct ttm_validate_buffer *entry;
+
+	list_for_each_entry(entry, list, head) {
+		struct ttm_buffer_object *bo = entry->bo;
+		if (!entry->reserved)
+			continue;
+
+		entry->reserved = false;
+		ttm_bo_unreserve(bo);
+	}
+}
+EXPORT_SYMBOL(ttm_eu_backoff_reservation);
+
+/*
+ * Reserve buffers for validation.
+ *
+ * If a buffer in the list is marked for CPU access, we back off and
+ * wait for that buffer to become free for GPU access.
+ *
+ * If a buffer is reserved for another validation, the validator with
+ * the highest validation sequence backs off and waits for that buffer
+ * to become unreserved. This prevents deadlocks when validating multiple
+ * buffers in different orders.
+ */
+
+int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
+{
+	struct ttm_validate_buffer *entry;
+	int ret;
+
+retry:
+	list_for_each_entry(entry, list, head) {
+		struct ttm_buffer_object *bo = entry->bo;
+
+		entry->reserved = false;
+		ret = ttm_bo_reserve(bo, true, false, true, val_seq);
+		if (ret != 0) {
+			ttm_eu_backoff_reservation(list);
+			if (ret == -EAGAIN) {
+				ret = ttm_bo_wait_unreserved(bo, true);
+				if (unlikely(ret != 0))
+					return ret;
+				goto retry;
+			} else
+				return ret;
+		}
+
+		entry->reserved = true;
+		if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
+			ttm_eu_backoff_reservation(list);
+			ret = ttm_bo_wait_cpu(bo, false);
+			if (ret)
+				return ret;
+			goto retry;
+		}
+	}
+	return 0;
+}
+EXPORT_SYMBOL(ttm_eu_reserve_buffers);
+
+void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
+{
+	struct ttm_validate_buffer *entry;
+
+	list_for_each_entry(entry, list, head) {
+		struct ttm_buffer_object *bo = entry->bo;
+		struct ttm_bo_driver *driver = bo->bdev->driver;
+		void *old_sync_obj;
+
+		spin_lock(&bo->lock);
+		old_sync_obj = bo->sync_obj;
+		bo->sync_obj = driver->sync_obj_ref(sync_obj);
+		bo->sync_obj_arg = entry->new_sync_obj_arg;
+		spin_unlock(&bo->lock);
+		ttm_bo_unreserve(bo);
+		entry->reserved = false;
+		if (old_sync_obj)
+			driver->sync_obj_unref(&old_sync_obj);
+	}
+}
+EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
new file mode 100644
index 0000000..f619ebc
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -0,0 +1,311 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include "ttm/ttm_lock.h"
+#include "ttm/ttm_module.h"
+#include <asm/atomic.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+
+#define TTM_WRITE_LOCK_PENDING    (1 << 0)
+#define TTM_VT_LOCK_PENDING       (1 << 1)
+#define TTM_SUSPEND_LOCK_PENDING  (1 << 2)
+#define TTM_VT_LOCK               (1 << 3)
+#define TTM_SUSPEND_LOCK          (1 << 4)
+
+void ttm_lock_init(struct ttm_lock *lock)
+{
+	spin_lock_init(&lock->lock);
+	init_waitqueue_head(&lock->queue);
+	lock->rw = 0;
+	lock->flags = 0;
+	lock->kill_takers = false;
+	lock->signal = SIGKILL;
+}
+EXPORT_SYMBOL(ttm_lock_init);
+
+void ttm_read_unlock(struct ttm_lock *lock)
+{
+	spin_lock(&lock->lock);
+	if (--lock->rw == 0)
+		wake_up_all(&lock->queue);
+	spin_unlock(&lock->lock);
+}
+EXPORT_SYMBOL(ttm_read_unlock);
+
+static bool __ttm_read_lock(struct ttm_lock *lock)
+{
+	bool locked = false;
+
+	spin_lock(&lock->lock);
+	if (unlikely(lock->kill_takers)) {
+		send_sig(lock->signal, current, 0);
+		spin_unlock(&lock->lock);
+		return false;
+	}
+	if (lock->rw >= 0 && lock->flags == 0) {
+		++lock->rw;
+		locked = true;
+	}
+	spin_unlock(&lock->lock);
+	return locked;
+}
+
+int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
+{
+	int ret = 0;
+
+	if (interruptible)
+		ret = wait_event_interruptible(lock->queue,
+					       __ttm_read_lock(lock));
+	else
+		wait_event(lock->queue, __ttm_read_lock(lock));
+	return ret;
+}
+EXPORT_SYMBOL(ttm_read_lock);
+
+static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
+{
+	bool block = true;
+
+	*locked = false;
+
+	spin_lock(&lock->lock);
+	if (unlikely(lock->kill_takers)) {
+		send_sig(lock->signal, current, 0);
+		spin_unlock(&lock->lock);
+		return false;
+	}
+	if (lock->rw >= 0 && lock->flags == 0) {
+		++lock->rw;
+		block = false;
+		*locked = true;
+	} else if (lock->flags == 0) {
+		block = false;
+	}
+	spin_unlock(&lock->lock);
+
+	return !block;
+}
+
+int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
+{
+	int ret = 0;
+	bool locked;
+
+	if (interruptible)
+		ret = wait_event_interruptible
+			(lock->queue, __ttm_read_trylock(lock, &locked));
+	else
+		wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
+
+	if (unlikely(ret != 0)) {
+		BUG_ON(locked);
+		return ret;
+	}
+
+	return (locked) ? 0 : -EBUSY;
+}
+
+void ttm_write_unlock(struct ttm_lock *lock)
+{
+	spin_lock(&lock->lock);
+	lock->rw = 0;
+	wake_up_all(&lock->queue);
+	spin_unlock(&lock->lock);
+}
+EXPORT_SYMBOL(ttm_write_unlock);
+
+static bool __ttm_write_lock(struct ttm_lock *lock)
+{
+	bool locked = false;
+
+	spin_lock(&lock->lock);
+	if (unlikely(lock->kill_takers)) {
+		send_sig(lock->signal, current, 0);
+		spin_unlock(&lock->lock);
+		return false;
+	}
+	if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
+		lock->rw = -1;
+		lock->flags &= ~TTM_WRITE_LOCK_PENDING;
+		locked = true;
+	} else {
+		lock->flags |= TTM_WRITE_LOCK_PENDING;
+	}
+	spin_unlock(&lock->lock);
+	return locked;
+}
+
+int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
+{
+	int ret = 0;
+
+	if (interruptible) {
+		ret = wait_event_interruptible(lock->queue,
+					       __ttm_write_lock(lock));
+		if (unlikely(ret != 0)) {
+			spin_lock(&lock->lock);
+			lock->flags &= ~TTM_WRITE_LOCK_PENDING;
+			wake_up_all(&lock->queue);
+			spin_unlock(&lock->lock);
+		}
+	} else
+		wait_event(lock->queue, __ttm_read_lock(lock));
+
+	return ret;
+}
+EXPORT_SYMBOL(ttm_write_lock);
+
+void ttm_write_lock_downgrade(struct ttm_lock *lock)
+{
+	spin_lock(&lock->lock);
+	lock->rw = 1;
+	wake_up_all(&lock->queue);
+	spin_unlock(&lock->lock);
+}
+
+static int __ttm_vt_unlock(struct ttm_lock *lock)
+{
+	int ret = 0;
+
+	spin_lock(&lock->lock);
+	if (unlikely(!(lock->flags & TTM_VT_LOCK)))
+		ret = -EINVAL;
+	lock->flags &= ~TTM_VT_LOCK;
+	wake_up_all(&lock->queue);
+	spin_unlock(&lock->lock);
+	printk(KERN_INFO TTM_PFX "vt unlock.\n");
+
+	return ret;
+}
+
+static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
+{
+	struct ttm_base_object *base = *p_base;
+	struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
+	int ret;
+
+	*p_base = NULL;
+	ret = __ttm_vt_unlock(lock);
+	BUG_ON(ret != 0);
+}
+
+static bool __ttm_vt_lock(struct ttm_lock *lock)
+{
+	bool locked = false;
+
+	spin_lock(&lock->lock);
+	if (lock->rw == 0) {
+		lock->flags &= ~TTM_VT_LOCK_PENDING;
+		lock->flags |= TTM_VT_LOCK;
+		locked = true;
+	} else {
+		lock->flags |= TTM_VT_LOCK_PENDING;
+	}
+	spin_unlock(&lock->lock);
+	return locked;
+}
+
+int ttm_vt_lock(struct ttm_lock *lock,
+		bool interruptible,
+		struct ttm_object_file *tfile)
+{
+	int ret = 0;
+
+	if (interruptible) {
+		ret = wait_event_interruptible(lock->queue,
+					       __ttm_vt_lock(lock));
+		if (unlikely(ret != 0)) {
+			spin_lock(&lock->lock);
+			lock->flags &= ~TTM_VT_LOCK_PENDING;
+			wake_up_all(&lock->queue);
+			spin_unlock(&lock->lock);
+			return ret;
+		}
+	} else
+		wait_event(lock->queue, __ttm_vt_lock(lock));
+
+	/*
+	 * Add a base-object, the destructor of which will
+	 * make sure the lock is released if the client dies
+	 * while holding it.
+	 */
+
+	ret = ttm_base_object_init(tfile, &lock->base, false,
+				   ttm_lock_type, &ttm_vt_lock_remove, NULL);
+	if (ret)
+		(void)__ttm_vt_unlock(lock);
+	else {
+		lock->vt_holder = tfile;
+		printk(KERN_INFO TTM_PFX "vt lock.\n");
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(ttm_vt_lock);
+
+int ttm_vt_unlock(struct ttm_lock *lock)
+{
+	return ttm_ref_object_base_unref(lock->vt_holder,
+					 lock->base.hash.key, TTM_REF_USAGE);
+}
+EXPORT_SYMBOL(ttm_vt_unlock);
+
+void ttm_suspend_unlock(struct ttm_lock *lock)
+{
+	spin_lock(&lock->lock);
+	lock->flags &= ~TTM_SUSPEND_LOCK;
+	wake_up_all(&lock->queue);
+	spin_unlock(&lock->lock);
+}
+
+static bool __ttm_suspend_lock(struct ttm_lock *lock)
+{
+	bool locked = false;
+
+	spin_lock(&lock->lock);
+	if (lock->rw == 0) {
+		lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
+		lock->flags |= TTM_SUSPEND_LOCK;
+		locked = true;
+	} else {
+		lock->flags |= TTM_SUSPEND_LOCK_PENDING;
+	}
+	spin_unlock(&lock->lock);
+	return locked;
+}
+
+void ttm_suspend_lock(struct ttm_lock *lock)
+{
+	wait_event(lock->queue, __ttm_suspend_lock(lock));
+}
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 072c281..f5245c0 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -274,16 +274,17 @@
 static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
 				     const struct sysinfo *si)
 {
-	struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
+	struct ttm_mem_zone *zone;
 	uint64_t mem;
 	int ret;
 
-	if (unlikely(!zone))
-		return -ENOMEM;
-
 	if (si->totalhigh == 0)
 		return 0;
 
+	zone = kzalloc(sizeof(*zone), GFP_KERNEL);
+	if (unlikely(!zone))
+		return -ENOMEM;
+
 	mem = si->totalram;
 	mem *= si->mem_unit;
 
@@ -322,8 +323,10 @@
 	 * No special dma32 zone needed.
 	 */
 
-	if (mem <= ((uint64_t) 1ULL << 32))
+	if (mem <= ((uint64_t) 1ULL << 32)) {
+		kfree(zone);
 		return 0;
+	}
 
 	/*
 	 * Limit max dma32 memory to 4GB for now
@@ -460,6 +463,7 @@
 {
 	return ttm_mem_global_free_zone(glob, NULL, amount);
 }
+EXPORT_SYMBOL(ttm_mem_global_free);
 
 static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
 				  struct ttm_mem_zone *single_zone,
@@ -533,6 +537,7 @@
 	return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
 					 interruptible);
 }
+EXPORT_SYMBOL(ttm_mem_global_alloc);
 
 int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
 			      struct page *page,
@@ -588,3 +593,4 @@
 	}
 	return 0;
 }
+EXPORT_SYMBOL(ttm_round_pot);
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
new file mode 100644
index 0000000..1099aba
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -0,0 +1,452 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/** @file ttm_ref_object.c
+ *
+ * Base- and reference object implementation for the various
+ * ttm objects. Implements reference counting, minimal security checks
+ * and release on file close.
+ */
+
+/**
+ * struct ttm_object_file
+ *
+ * @tdev: Pointer to the ttm_object_device.
+ *
+ * @lock: Lock that protects the ref_list list and the
+ * ref_hash hash tables.
+ *
+ * @ref_list: List of ttm_ref_objects to be destroyed at
+ * file release.
+ *
+ * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
+ * for fast lookup of ref objects given a base object.
+ */
+
+#include "ttm/ttm_object.h"
+#include "ttm/ttm_module.h"
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <asm/atomic.h>
+
+struct ttm_object_file {
+	struct ttm_object_device *tdev;
+	rwlock_t lock;
+	struct list_head ref_list;
+	struct drm_open_hash ref_hash[TTM_REF_NUM];
+	struct kref refcount;
+};
+
+/**
+ * struct ttm_object_device
+ *
+ * @object_lock: lock that protects the object_hash hash table.
+ *
+ * @object_hash: hash table for fast lookup of object global names.
+ *
+ * @object_count: Per device object count.
+ *
+ * This is the per-device data structure needed for ttm object management.
+ */
+
+struct ttm_object_device {
+	rwlock_t object_lock;
+	struct drm_open_hash object_hash;
+	atomic_t object_count;
+	struct ttm_mem_global *mem_glob;
+};
+
+/**
+ * struct ttm_ref_object
+ *
+ * @hash: Hash entry for the per-file object reference hash.
+ *
+ * @head: List entry for the per-file list of ref-objects.
+ *
+ * @kref: Ref count.
+ *
+ * @obj: Base object this ref object is referencing.
+ *
+ * @ref_type: Type of ref object.
+ *
+ * This is similar to an idr object, but it also has a hash table entry
+ * that allows lookup with a pointer to the referenced object as a key. In
+ * that way, one can easily detect whether a base object is referenced by
+ * a particular ttm_object_file. It also carries a ref count to avoid creating
+ * multiple ref objects if a ttm_object_file references the same base
+ * object more than once.
+ */
+
+struct ttm_ref_object {
+	struct drm_hash_item hash;
+	struct list_head head;
+	struct kref kref;
+	struct ttm_base_object *obj;
+	enum ttm_ref_type ref_type;
+	struct ttm_object_file *tfile;
+};
+
+static inline struct ttm_object_file *
+ttm_object_file_ref(struct ttm_object_file *tfile)
+{
+	kref_get(&tfile->refcount);
+	return tfile;
+}
+
+static void ttm_object_file_destroy(struct kref *kref)
+{
+	struct ttm_object_file *tfile =
+		container_of(kref, struct ttm_object_file, refcount);
+
+	kfree(tfile);
+}
+
+
+static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
+{
+	struct ttm_object_file *tfile = *p_tfile;
+
+	*p_tfile = NULL;
+	kref_put(&tfile->refcount, ttm_object_file_destroy);
+}
+
+
+int ttm_base_object_init(struct ttm_object_file *tfile,
+			 struct ttm_base_object *base,
+			 bool shareable,
+			 enum ttm_object_type object_type,
+			 void (*refcount_release) (struct ttm_base_object **),
+			 void (*ref_obj_release) (struct ttm_base_object *,
+						  enum ttm_ref_type ref_type))
+{
+	struct ttm_object_device *tdev = tfile->tdev;
+	int ret;
+
+	base->shareable = shareable;
+	base->tfile = ttm_object_file_ref(tfile);
+	base->refcount_release = refcount_release;
+	base->ref_obj_release = ref_obj_release;
+	base->object_type = object_type;
+	write_lock(&tdev->object_lock);
+	kref_init(&base->refcount);
+	ret = drm_ht_just_insert_please(&tdev->object_hash,
+					&base->hash,
+					(unsigned long)base, 31, 0, 0);
+	write_unlock(&tdev->object_lock);
+	if (unlikely(ret != 0))
+		goto out_err0;
+
+	ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+	if (unlikely(ret != 0))
+		goto out_err1;
+
+	ttm_base_object_unref(&base);
+
+	return 0;
+out_err1:
+	(void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+out_err0:
+	return ret;
+}
+EXPORT_SYMBOL(ttm_base_object_init);
+
+static void ttm_release_base(struct kref *kref)
+{
+	struct ttm_base_object *base =
+	    container_of(kref, struct ttm_base_object, refcount);
+	struct ttm_object_device *tdev = base->tfile->tdev;
+
+	(void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+	write_unlock(&tdev->object_lock);
+	if (base->refcount_release) {
+		ttm_object_file_unref(&base->tfile);
+		base->refcount_release(&base);
+	}
+	write_lock(&tdev->object_lock);
+}
+
+void ttm_base_object_unref(struct ttm_base_object **p_base)
+{
+	struct ttm_base_object *base = *p_base;
+	struct ttm_object_device *tdev = base->tfile->tdev;
+
+	*p_base = NULL;
+
+	/*
+	 * Need to take the lock here to avoid racing with
+	 * users trying to look up the object.
+	 */
+
+	write_lock(&tdev->object_lock);
+	(void)kref_put(&base->refcount, &ttm_release_base);
+	write_unlock(&tdev->object_lock);
+}
+EXPORT_SYMBOL(ttm_base_object_unref);
+
+struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
+					       uint32_t key)
+{
+	struct ttm_object_device *tdev = tfile->tdev;
+	struct ttm_base_object *base;
+	struct drm_hash_item *hash;
+	int ret;
+
+	read_lock(&tdev->object_lock);
+	ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
+
+	if (likely(ret == 0)) {
+		base = drm_hash_entry(hash, struct ttm_base_object, hash);
+		kref_get(&base->refcount);
+	}
+	read_unlock(&tdev->object_lock);
+
+	if (unlikely(ret != 0))
+		return NULL;
+
+	if (tfile != base->tfile && !base->shareable) {
+		printk(KERN_ERR TTM_PFX
+		       "Attempted access of non-shareable object.\n");
+		ttm_base_object_unref(&base);
+		return NULL;
+	}
+
+	return base;
+}
+EXPORT_SYMBOL(ttm_base_object_lookup);
+
+int ttm_ref_object_add(struct ttm_object_file *tfile,
+		       struct ttm_base_object *base,
+		       enum ttm_ref_type ref_type, bool *existed)
+{
+	struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+	struct ttm_ref_object *ref;
+	struct drm_hash_item *hash;
+	struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+	int ret = -EINVAL;
+
+	if (existed != NULL)
+		*existed = true;
+
+	while (ret == -EINVAL) {
+		read_lock(&tfile->lock);
+		ret = drm_ht_find_item(ht, base->hash.key, &hash);
+
+		if (ret == 0) {
+			ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
+			kref_get(&ref->kref);
+			read_unlock(&tfile->lock);
+			break;
+		}
+
+		read_unlock(&tfile->lock);
+		ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
+					   false, false);
+		if (unlikely(ret != 0))
+			return ret;
+		ref = kmalloc(sizeof(*ref), GFP_KERNEL);
+		if (unlikely(ref == NULL)) {
+			ttm_mem_global_free(mem_glob, sizeof(*ref));
+			return -ENOMEM;
+		}
+
+		ref->hash.key = base->hash.key;
+		ref->obj = base;
+		ref->tfile = tfile;
+		ref->ref_type = ref_type;
+		kref_init(&ref->kref);
+
+		write_lock(&tfile->lock);
+		ret = drm_ht_insert_item(ht, &ref->hash);
+
+		if (likely(ret == 0)) {
+			list_add_tail(&ref->head, &tfile->ref_list);
+			kref_get(&base->refcount);
+			write_unlock(&tfile->lock);
+			if (existed != NULL)
+				*existed = false;
+			break;
+		}
+
+		write_unlock(&tfile->lock);
+		BUG_ON(ret != -EINVAL);
+
+		ttm_mem_global_free(mem_glob, sizeof(*ref));
+		kfree(ref);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(ttm_ref_object_add);
+
+static void ttm_ref_object_release(struct kref *kref)
+{
+	struct ttm_ref_object *ref =
+	    container_of(kref, struct ttm_ref_object, kref);
+	struct ttm_base_object *base = ref->obj;
+	struct ttm_object_file *tfile = ref->tfile;
+	struct drm_open_hash *ht;
+	struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+
+	ht = &tfile->ref_hash[ref->ref_type];
+	(void)drm_ht_remove_item(ht, &ref->hash);
+	list_del(&ref->head);
+	write_unlock(&tfile->lock);
+
+	if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
+		base->ref_obj_release(base, ref->ref_type);
+
+	ttm_base_object_unref(&ref->obj);
+	ttm_mem_global_free(mem_glob, sizeof(*ref));
+	kfree(ref);
+	write_lock(&tfile->lock);
+}
+
+int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
+			      unsigned long key, enum ttm_ref_type ref_type)
+{
+	struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+	struct ttm_ref_object *ref;
+	struct drm_hash_item *hash;
+	int ret;
+
+	write_lock(&tfile->lock);
+	ret = drm_ht_find_item(ht, key, &hash);
+	if (unlikely(ret != 0)) {
+		write_unlock(&tfile->lock);
+		return -EINVAL;
+	}
+	ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
+	kref_put(&ref->kref, ttm_ref_object_release);
+	write_unlock(&tfile->lock);
+	return 0;
+}
+EXPORT_SYMBOL(ttm_ref_object_base_unref);
+
+void ttm_object_file_release(struct ttm_object_file **p_tfile)
+{
+	struct ttm_ref_object *ref;
+	struct list_head *list;
+	unsigned int i;
+	struct ttm_object_file *tfile = *p_tfile;
+
+	*p_tfile = NULL;
+	write_lock(&tfile->lock);
+
+	/*
+	 * Since we release the lock within the loop, we have to
+	 * restart it from the beginning each time.
+	 */
+
+	while (!list_empty(&tfile->ref_list)) {
+		list = tfile->ref_list.next;
+		ref = list_entry(list, struct ttm_ref_object, head);
+		ttm_ref_object_release(&ref->kref);
+	}
+
+	for (i = 0; i < TTM_REF_NUM; ++i)
+		drm_ht_remove(&tfile->ref_hash[i]);
+
+	write_unlock(&tfile->lock);
+	ttm_object_file_unref(&tfile);
+}
+EXPORT_SYMBOL(ttm_object_file_release);
+
+struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
+					     unsigned int hash_order)
+{
+	struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
+	unsigned int i;
+	unsigned int j = 0;
+	int ret;
+
+	if (unlikely(tfile == NULL))
+		return NULL;
+
+	rwlock_init(&tfile->lock);
+	tfile->tdev = tdev;
+	kref_init(&tfile->refcount);
+	INIT_LIST_HEAD(&tfile->ref_list);
+
+	for (i = 0; i < TTM_REF_NUM; ++i) {
+		ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
+		if (ret) {
+			j = i;
+			goto out_err;
+		}
+	}
+
+	return tfile;
+out_err:
+	for (i = 0; i < j; ++i)
+		drm_ht_remove(&tfile->ref_hash[i]);
+
+	kfree(tfile);
+
+	return NULL;
+}
+EXPORT_SYMBOL(ttm_object_file_init);
+
+struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
+						 *mem_glob,
+						 unsigned int hash_order)
+{
+	struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
+	int ret;
+
+	if (unlikely(tdev == NULL))
+		return NULL;
+
+	tdev->mem_glob = mem_glob;
+	rwlock_init(&tdev->object_lock);
+	atomic_set(&tdev->object_count, 0);
+	ret = drm_ht_create(&tdev->object_hash, hash_order);
+
+	if (likely(ret == 0))
+		return tdev;
+
+	kfree(tdev);
+	return NULL;
+}
+EXPORT_SYMBOL(ttm_object_device_init);
+
+void ttm_object_device_release(struct ttm_object_device **p_tdev)
+{
+	struct ttm_object_device *tdev = *p_tdev;
+
+	*p_tdev = NULL;
+
+	write_lock(&tdev->object_lock);
+	drm_ht_remove(&tdev->object_hash);
+	write_unlock(&tdev->object_lock);
+
+	kfree(tdev);
+}
+EXPORT_SYMBOL(ttm_object_device_release);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 7bcb89f..9c2b1cc 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -192,6 +192,7 @@
 	ttm->state = tt_unbound;
 	return 0;
 }
+EXPORT_SYMBOL(ttm_tt_populate);
 
 #ifdef CONFIG_X86
 static inline int ttm_tt_set_page_caching(struct page *p,
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 0258289..e2997a8 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -1253,10 +1253,9 @@
 {
 	struct hid_device *hid = usb_get_intfdata(intf);
 	struct usbhid_device *usbhid = hid->driver_data;
-	struct usb_device *udev = interface_to_usbdev(intf);
 	int status;
 
-	if (udev->auto_pm) {
+	if (message.event & PM_EVENT_AUTO) {
 		spin_lock_irq(&usbhid->lock);	/* Sync with error handler */
 		if (!test_bit(HID_RESET_PENDING, &usbhid->iofl)
 		    && !test_bit(HID_CLEAR_HALT, &usbhid->iofl)
@@ -1281,7 +1280,7 @@
 			return -EIO;
 	}
 
-	if (!ignoreled && udev->auto_pm) {
+	if (!ignoreled && (message.event & PM_EVENT_AUTO)) {
 		spin_lock_irq(&usbhid->lock);
 		if (test_bit(HID_LED_ON, &usbhid->iofl)) {
 			spin_unlock_irq(&usbhid->lock);
@@ -1294,7 +1293,8 @@
 	hid_cancel_delayed_stuff(usbhid);
 	hid_cease_io(usbhid);
 
-	if (udev->auto_pm && test_bit(HID_KEYS_PRESSED, &usbhid->iofl)) {
+	if ((message.event & PM_EVENT_AUTO) &&
+			test_bit(HID_KEYS_PRESSED, &usbhid->iofl)) {
 		/* lost race against keypresses */
 		status = hid_start_in(hid);
 		if (status < 0)
diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c
index b115726..c721c0a 100644
--- a/drivers/input/xen-kbdfront.c
+++ b/drivers/input/xen-kbdfront.c
@@ -21,7 +21,10 @@
 #include <linux/errno.h>
 #include <linux/module.h>
 #include <linux/input.h>
+
 #include <asm/xen/hypervisor.h>
+
+#include <xen/xen.h>
 #include <xen/events.h>
 #include <xen/page.h>
 #include <xen/interface/io/fbif.h>
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index e4ff50b..fcb6ec1 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -712,6 +712,12 @@
 
 	/* End of packet == #XX so look for the '#' */
 	if (put_buf_cnt > 3 && put_buf[put_buf_cnt - 3] == '#') {
+		if (put_buf_cnt >= BUFMAX) {
+			eprintk("kgdbts: ERROR: put buffer overflow on"
+				" '%s' line %i\n", ts.name, ts.idx);
+			put_buf_cnt = 0;
+			return 0;
+		}
 		put_buf[put_buf_cnt] = '\0';
 		v2printk("put%i: %s\n", ts.idx, put_buf);
 		/* Trigger check here */
@@ -885,16 +891,16 @@
 	int nmi_sleep = 0;
 	int i;
 
-	ptr = strstr(config, "F");
+	ptr = strchr(config, 'F');
 	if (ptr)
 		fork_test = simple_strtol(ptr + 1, NULL, 10);
-	ptr = strstr(config, "S");
+	ptr = strchr(config, 'S');
 	if (ptr)
 		do_sys_open_test = simple_strtol(ptr + 1, NULL, 10);
-	ptr = strstr(config, "N");
+	ptr = strchr(config, 'N');
 	if (ptr)
 		nmi_sleep = simple_strtol(ptr+1, NULL, 10);
-	ptr = strstr(config, "I");
+	ptr = strchr(config, 'I');
 	if (ptr)
 		sstep_test = simple_strtol(ptr+1, NULL, 10);
 
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 47e84ef..3b48681 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -579,7 +579,7 @@
  *
  *    As well, the device might refuse going to sleep for whichever
  *    reason. In this case we just fail. For system suspend/hibernate,
- *    we *can't* fail. We look at usb_dev->auto_pm to see if the
+ *    we *can't* fail. We check PM_EVENT_AUTO to see if the
  *    suspend call comes from the USB stack or from the system and act
  *    in consequence.
  *
@@ -591,14 +591,11 @@
 	int result = 0;
 	struct device *dev = &iface->dev;
 	struct i2400mu *i2400mu = usb_get_intfdata(iface);
-#ifdef CONFIG_PM
-	struct usb_device *usb_dev = i2400mu->usb_dev;
-#endif
 	unsigned is_autosuspend = 0;
 	struct i2400m *i2400m = &i2400mu->i2400m;
 
 #ifdef CONFIG_PM
-	if (usb_dev->auto_pm > 0)
+	if (pm_msg.event & PM_EVENT_AUTO)
 		is_autosuspend = 1;
 #endif
 
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index baa051d..a869b45 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -42,6 +42,7 @@
 #include <linux/mm.h>
 #include <net/ip.h>
 
+#include <xen/xen.h>
 #include <xen/xenbus.h>
 #include <xen/events.h>
 #include <xen/page.h>
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index fdc864f..b1ecefa 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -27,10 +27,10 @@
 	default y
 	help
 	  Say Y here if you want to include support for the deprecated
-	  pci_find_slot() and pci_find_device() APIs.  Most drivers have
-	  been converted over to using the proper hotplug APIs, so this
-	  option serves to include/exclude only a few drivers that are
-	  still using this API.
+	  pci_find_device() API.  Most drivers have been converted over
+	  to using the proper hotplug APIs, so this option serves to
+	  include/exclude only a few drivers that are still using this
+	  API.
 
 config PCI_DEBUG
 	bool "PCI Debugging"
@@ -69,3 +69,10 @@
 	  physical resources.
 
 	  If unsure, say N.
+
+config PCI_IOAPIC
+	bool
+	depends on PCI
+	depends on ACPI
+	depends on HOTPLUG
+	default y
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 4a7f11d..4df48d5 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -14,6 +14,8 @@
 # Build PCI Express stuff if needed
 obj-$(CONFIG_PCIEPORTBUS) += pcie/
 
+obj-$(CONFIG_PCI_IOAPIC) += ioapic.o
+
 obj-$(CONFIG_HOTPLUG) += hotplug.o
 
 # Build the PCI Hotplug drivers if we were asked to
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 416f6ac..6cdc931 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -320,7 +320,7 @@
 	for (bus = dev->bus; bus; bus = bus->parent) {
 		struct pci_dev *bridge = bus->self;
 
-		if (!bridge || !bridge->is_pcie ||
+		if (!bridge || !pci_is_pcie(bridge) ||
 		    bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
 			return 0;
 
@@ -645,8 +645,11 @@
 			       "x2apic and Intr-remapping.\n");
 #endif
 #ifdef CONFIG_DMAR
-		if (ret && !no_iommu && !iommu_detected && !dmar_disabled)
+		if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
 			iommu_detected = 1;
+			/* Make sure ACS will be enabled */
+			pci_request_acs();
+		}
 #endif
 #ifdef CONFIG_X86
 		if (ret)
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
index 3625b09..6cd9f3c 100644
--- a/drivers/pci/hotplug/Makefile
+++ b/drivers/pci/hotplug/Makefile
@@ -6,18 +6,22 @@
 obj-$(CONFIG_HOTPLUG_PCI_COMPAQ)	+= cpqphp.o
 obj-$(CONFIG_HOTPLUG_PCI_IBM)		+= ibmphp.o
 
-# pciehp should be linked before acpiphp in order to allow the native driver
-# to attempt to bind first. We can then fall back to generic support.
+# native drivers should be linked before acpiphp in order to allow the
+# native driver to attempt to bind first. We can then fall back to
+# generic support.
 
 obj-$(CONFIG_HOTPLUG_PCI_PCIE)		+= pciehp.o
-obj-$(CONFIG_HOTPLUG_PCI_ACPI)		+= acpiphp.o
-obj-$(CONFIG_HOTPLUG_PCI_ACPI_IBM)	+= acpiphp_ibm.o
 obj-$(CONFIG_HOTPLUG_PCI_CPCI_ZT5550)	+= cpcihp_zt5550.o
 obj-$(CONFIG_HOTPLUG_PCI_CPCI_GENERIC)	+= cpcihp_generic.o
 obj-$(CONFIG_HOTPLUG_PCI_SHPC)		+= shpchp.o
 obj-$(CONFIG_HOTPLUG_PCI_RPA)		+= rpaphp.o
 obj-$(CONFIG_HOTPLUG_PCI_RPA_DLPAR)	+= rpadlpar_io.o
 obj-$(CONFIG_HOTPLUG_PCI_SGI)		+= sgi_hotplug.o
+obj-$(CONFIG_HOTPLUG_PCI_ACPI)		+= acpiphp.o
+
+# acpiphp_ibm extends acpiphp, so should be linked afterwards.
+
+obj-$(CONFIG_HOTPLUG_PCI_ACPI_IBM)	+= acpiphp_ibm.o
 
 # Link this last so it doesn't claim devices that have a real hotplug driver
 obj-$(CONFIG_HOTPLUG_PCI_FAKE)		+= fakephp.o
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 0f32571..3c76fc6 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -362,6 +362,8 @@
 		status = acpi_pci_osc_control_set(handle, flags);
 		if (ACPI_SUCCESS(status))
 			goto got_one;
+		if (status == AE_SUPPORT)
+			goto no_control;
 		kfree(string.pointer);
 		string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL };
 	}
@@ -394,10 +396,9 @@
 		if (ACPI_FAILURE(status))
 			break;
 	}
-
+no_control:
 	dbg("Cannot get control of hotplug hardware for pci %s\n",
 	    pci_name(pdev));
-
 	kfree(string.pointer);
 	return -ENODEV;
 got_one:
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index 7d938df..bab5204 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -146,12 +146,6 @@
 	struct module *owner;
 };
 
-struct acpiphp_ioapic {
-	struct pci_dev *dev;
-	u32 gsi_base;
-	struct list_head list;
-};
-
 /* PCI bus bridge HID */
 #define ACPI_PCI_HOST_HID		"PNP0A03"
 
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index df1b0ea..8e952fd 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -52,8 +52,6 @@
 #include "acpiphp.h"
 
 static LIST_HEAD(bridge_list);
-static LIST_HEAD(ioapic_list);
-static DEFINE_SPINLOCK(ioapic_list_lock);
 
 #define MY_NAME "acpiphp_glue"
 
@@ -311,17 +309,13 @@
 /* find acpiphp_func from acpiphp_bridge */
 static struct acpiphp_func *acpiphp_bridge_handle_to_function(acpi_handle handle)
 {
-	struct list_head *node, *l;
 	struct acpiphp_bridge *bridge;
 	struct acpiphp_slot *slot;
 	struct acpiphp_func *func;
 
-	list_for_each(node, &bridge_list) {
-		bridge = list_entry(node, struct acpiphp_bridge, list);
+	list_for_each_entry(bridge, &bridge_list, list) {
 		for (slot = bridge->slots; slot; slot = slot->next) {
-			list_for_each(l, &slot->funcs) {
-				func = list_entry(l, struct acpiphp_func,
-							sibling);
+			list_for_each_entry(func, &slot->funcs, sibling) {
 				if (func->handle == handle)
 					return func;
 			}
@@ -495,21 +489,19 @@
 
 static struct acpiphp_bridge *acpiphp_handle_to_bridge(acpi_handle handle)
 {
-	struct list_head *head;
-	list_for_each(head, &bridge_list) {
-		struct acpiphp_bridge *bridge = list_entry(head,
-						struct acpiphp_bridge, list);
+	struct acpiphp_bridge *bridge;
+
+	list_for_each_entry(bridge, &bridge_list, list)
 		if (bridge->handle == handle)
 			return bridge;
-	}
 
 	return NULL;
 }
 
 static void cleanup_bridge(struct acpiphp_bridge *bridge)
 {
-	struct list_head *list, *tmp;
-	struct acpiphp_slot *slot;
+	struct acpiphp_slot *slot, *next;
+	struct acpiphp_func *func, *tmp;
 	acpi_status status;
 	acpi_handle handle = bridge->handle;
 
@@ -530,10 +522,8 @@
 
 	slot = bridge->slots;
 	while (slot) {
-		struct acpiphp_slot *next = slot->next;
-		list_for_each_safe (list, tmp, &slot->funcs) {
-			struct acpiphp_func *func;
-			func = list_entry(list, struct acpiphp_func, sibling);
+		next = slot->next;
+		list_for_each_entry_safe(func, tmp, &slot->funcs, sibling) {
 			if (is_dock_device(func->handle)) {
 				unregister_hotplug_dock_device(func->handle);
 				unregister_dock_notifier(&func->nb);
@@ -545,7 +535,7 @@
 				if (ACPI_FAILURE(status))
 					err("failed to remove notify handler\n");
 			}
-			list_del(list);
+			list_del(&func->sibling);
 			kfree(func);
 		}
 		acpiphp_unregister_hotplug_slot(slot);
@@ -606,204 +596,17 @@
 					   handle_hotplug_event_bridge);
 }
 
-static struct pci_dev * get_apic_pci_info(acpi_handle handle)
-{
-	struct pci_dev *dev;
-
-	dev = acpi_get_pci_dev(handle);
-	if (!dev)
-		return NULL;
-
-	if ((dev->class != PCI_CLASS_SYSTEM_PIC_IOAPIC) &&
-	    (dev->class != PCI_CLASS_SYSTEM_PIC_IOXAPIC))
-	{
-		pci_dev_put(dev);
-		return NULL;
-	}
-
-	return dev;
-}
-
-static int get_gsi_base(acpi_handle handle, u32 *gsi_base)
-{
-	acpi_status status;
-	int result = -1;
-	unsigned long long gsb;
-	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
-	union acpi_object *obj;
-	void *table;
-
-	status = acpi_evaluate_integer(handle, "_GSB", NULL, &gsb);
-	if (ACPI_SUCCESS(status)) {
-		*gsi_base = (u32)gsb;
-		return 0;
-	}
-
-	status = acpi_evaluate_object(handle, "_MAT", NULL, &buffer);
-	if (ACPI_FAILURE(status) || !buffer.length || !buffer.pointer)
-		return -1;
-
-	obj = buffer.pointer;
-	if (obj->type != ACPI_TYPE_BUFFER)
-		goto out;
-
-	table = obj->buffer.pointer;
-	switch (((struct acpi_subtable_header *)table)->type) {
-	case ACPI_MADT_TYPE_IO_SAPIC:
-		*gsi_base = ((struct acpi_madt_io_sapic *)table)->global_irq_base;
-		result = 0;
-		break;
-	case ACPI_MADT_TYPE_IO_APIC:
-		*gsi_base = ((struct acpi_madt_io_apic *)table)->global_irq_base;
-		result = 0;
-		break;
-	default:
-		break;
-	}
- out:
-	kfree(buffer.pointer);
-	return result;
-}
-
-static acpi_status
-ioapic_add(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
-	acpi_status status;
-	unsigned long long sta;
-	acpi_handle tmp;
-	struct pci_dev *pdev;
-	u32 gsi_base;
-	u64 phys_addr;
-	struct acpiphp_ioapic *ioapic;
-
-	/* Evaluate _STA if present */
-	status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
-	if (ACPI_SUCCESS(status) && sta != ACPI_STA_ALL)
-		return AE_CTRL_DEPTH;
-
-	/* Scan only PCI bus scope */
-	status = acpi_get_handle(handle, "_HID", &tmp);
-	if (ACPI_SUCCESS(status))
-		return AE_CTRL_DEPTH;
-
-	if (get_gsi_base(handle, &gsi_base))
-		return AE_OK;
-
-	ioapic = kmalloc(sizeof(*ioapic), GFP_KERNEL);
-	if (!ioapic)
-		return AE_NO_MEMORY;
-
-	pdev = get_apic_pci_info(handle);
-	if (!pdev)
-		goto exit_kfree;
-
-	if (pci_enable_device(pdev))
-		goto exit_pci_dev_put;
-
-	pci_set_master(pdev);
-
-	if (pci_request_region(pdev, 0, "I/O APIC(acpiphp)"))
-		goto exit_pci_disable_device;
-
-	phys_addr = pci_resource_start(pdev, 0);
-	if (acpi_register_ioapic(handle, phys_addr, gsi_base))
-		goto exit_pci_release_region;
-
-	ioapic->gsi_base = gsi_base;
-	ioapic->dev = pdev;
-	spin_lock(&ioapic_list_lock);
-	list_add_tail(&ioapic->list, &ioapic_list);
-	spin_unlock(&ioapic_list_lock);
-
-	return AE_OK;
-
- exit_pci_release_region:
-	pci_release_region(pdev, 0);
- exit_pci_disable_device:
-	pci_disable_device(pdev);
- exit_pci_dev_put:
-	pci_dev_put(pdev);
- exit_kfree:
-	kfree(ioapic);
-
-	return AE_OK;
-}
-
-static acpi_status
-ioapic_remove(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
-	acpi_status status;
-	unsigned long long sta;
-	acpi_handle tmp;
-	u32 gsi_base;
-	struct acpiphp_ioapic *pos, *n, *ioapic = NULL;
-
-	/* Evaluate _STA if present */
-	status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
-	if (ACPI_SUCCESS(status) && sta != ACPI_STA_ALL)
-		return AE_CTRL_DEPTH;
-
-	/* Scan only PCI bus scope */
-	status = acpi_get_handle(handle, "_HID", &tmp);
-	if (ACPI_SUCCESS(status))
-		return AE_CTRL_DEPTH;
-
-	if (get_gsi_base(handle, &gsi_base))
-		return AE_OK;
-
-	acpi_unregister_ioapic(handle, gsi_base);
-
-	spin_lock(&ioapic_list_lock);
-	list_for_each_entry_safe(pos, n, &ioapic_list, list) {
-		if (pos->gsi_base != gsi_base)
-			continue;
-		ioapic = pos;
-		list_del(&ioapic->list);
-		break;
-	}
-	spin_unlock(&ioapic_list_lock);
-
-	if (!ioapic)
-		return AE_OK;
-
-	pci_release_region(ioapic->dev, 0);
-	pci_disable_device(ioapic->dev);
-	pci_dev_put(ioapic->dev);
-	kfree(ioapic);
-
-	return AE_OK;
-}
-
-static int acpiphp_configure_ioapics(acpi_handle handle)
-{
-	ioapic_add(handle, 0, NULL, NULL);
-	acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
-			    ACPI_UINT32_MAX, ioapic_add, NULL, NULL, NULL);
-	return 0;
-}
-
-static int acpiphp_unconfigure_ioapics(acpi_handle handle)
-{
-	ioapic_remove(handle, 0, NULL, NULL);
-	acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
-			    ACPI_UINT32_MAX, ioapic_remove, NULL, NULL, NULL);
-	return 0;
-}
-
 static int power_on_slot(struct acpiphp_slot *slot)
 {
 	acpi_status status;
 	struct acpiphp_func *func;
-	struct list_head *l;
 	int retval = 0;
 
 	/* if already enabled, just skip */
 	if (slot->flags & SLOT_POWEREDON)
 		goto err_exit;
 
-	list_for_each (l, &slot->funcs) {
-		func = list_entry(l, struct acpiphp_func, sibling);
-
+	list_for_each_entry(func, &slot->funcs, sibling) {
 		if (func->flags & FUNC_HAS_PS0) {
 			dbg("%s: executing _PS0\n", __func__);
 			status = acpi_evaluate_object(func->handle, "_PS0", NULL, NULL);
@@ -829,7 +632,6 @@
 {
 	acpi_status status;
 	struct acpiphp_func *func;
-	struct list_head *l;
 
 	int retval = 0;
 
@@ -837,9 +639,7 @@
 	if ((slot->flags & SLOT_POWEREDON) == 0)
 		goto err_exit;
 
-	list_for_each (l, &slot->funcs) {
-		func = list_entry(l, struct acpiphp_func, sibling);
-
+	list_for_each_entry(func, &slot->funcs, sibling) {
 		if (func->flags & FUNC_HAS_PS3) {
 			status = acpi_evaluate_object(func->handle, "_PS3", NULL, NULL);
 			if (ACPI_FAILURE(status)) {
@@ -966,7 +766,6 @@
 {
 	struct pci_dev *dev;
 	struct pci_bus *bus = slot->bridge->pci_bus;
-	struct list_head *l;
 	struct acpiphp_func *func;
 	int retval = 0;
 	int num, max, pass;
@@ -1006,21 +805,16 @@
 		}
 	}
 
-	list_for_each (l, &slot->funcs) {
-		func = list_entry(l, struct acpiphp_func, sibling);
+	list_for_each_entry(func, &slot->funcs, sibling)
 		acpiphp_bus_add(func);
-	}
 
 	pci_bus_assign_resources(bus);
 	acpiphp_sanitize_bus(bus);
 	acpiphp_set_hpp_values(bus);
-	list_for_each_entry(func, &slot->funcs, sibling)
-		acpiphp_configure_ioapics(func->handle);
 	pci_enable_bridges(bus);
 	pci_bus_add_devices(bus);
 
-	list_for_each (l, &slot->funcs) {
-		func = list_entry(l, struct acpiphp_func, sibling);
+	list_for_each_entry(func, &slot->funcs, sibling) {
 		dev = pci_get_slot(bus, PCI_DEVFN(slot->device,
 						  func->function));
 		if (!dev)
@@ -1091,7 +885,6 @@
 	}
 
 	list_for_each_entry(func, &slot->funcs, sibling) {
-		acpiphp_unconfigure_ioapics(func->handle);
 		acpiphp_bus_trim(func->handle);
 	}
 
@@ -1119,12 +912,9 @@
 	acpi_status status;
 	unsigned long long sta = 0;
 	u32 dvid;
-	struct list_head *l;
 	struct acpiphp_func *func;
 
-	list_for_each (l, &slot->funcs) {
-		func = list_entry(l, struct acpiphp_func, sibling);
-
+	list_for_each_entry(func, &slot->funcs, sibling) {
 		if (func->flags & FUNC_HAS_STA) {
 			status = acpi_evaluate_integer(func->handle, "_STA", NULL, &sta);
 			if (ACPI_SUCCESS(status) && sta)
@@ -1152,13 +942,10 @@
 {
 	acpi_status status;
 	struct acpiphp_func *func;
-	struct list_head *l;
 	struct acpi_object_list arg_list;
 	union acpi_object arg;
 
-	list_for_each (l, &slot->funcs) {
-		func = list_entry(l, struct acpiphp_func, sibling);
-
+	list_for_each_entry(func, &slot->funcs, sibling) {
 		/* We don't want to call _EJ0 on non-existing functions. */
 		if ((func->flags & FUNC_HAS_EJ0)) {
 			/* _EJ0 method take one argument */
@@ -1275,7 +1062,6 @@
 	acpiphp_sanitize_bus(bus);
 	acpiphp_set_hpp_values(bus);
 	pci_enable_bridges(bus);
-	acpiphp_configure_ioapics(handle);
 	return 0;
 }
 
@@ -1542,7 +1328,7 @@
 	struct acpiphp_bridge *bridge;
 	int num_slots = 0;
 
-	list_for_each_entry (bridge, &bridge_list, list) {
+	list_for_each_entry(bridge, &bridge_list, list) {
 		dbg("Bus %04x:%02x has %d slot%s\n",
 				pci_domain_nr(bridge->pci_bus),
 				bridge->pci_bus->number, bridge->nr_slots,
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c
index 83f337c..c7084f0 100644
--- a/drivers/pci/hotplug/ibmphp_hpc.c
+++ b/drivers/pci/hotplug/ibmphp_hpc.c
@@ -890,7 +890,7 @@
 			msleep(POLL_INTERVAL_SEC * 1000);
 
 			if (kthread_should_stop())
-				break;
+				goto out_sleep;
 			
 			down (&semOperations);
 			
@@ -904,6 +904,7 @@
 		/* give up the hardware semaphore */
 		up (&semOperations);
 		/* sleep for a short time just for good measure */
+out_sleep:
 		msleep(100);
 	}
 	up (&sem_exit);
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 0325d98..38183a5 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -68,26 +68,26 @@
 static char *pci_bus_speed_strings[] = {
 	"33 MHz PCI",		/* 0x00 */
 	"66 MHz PCI",		/* 0x01 */
-	"66 MHz PCIX", 		/* 0x02 */
-	"100 MHz PCIX",		/* 0x03 */
-	"133 MHz PCIX",		/* 0x04 */
+	"66 MHz PCI-X",		/* 0x02 */
+	"100 MHz PCI-X",	/* 0x03 */
+	"133 MHz PCI-X",	/* 0x04 */
 	NULL,			/* 0x05 */
 	NULL,			/* 0x06 */
 	NULL,			/* 0x07 */
 	NULL,			/* 0x08 */
-	"66 MHz PCIX 266",	/* 0x09 */
-	"100 MHz PCIX 266",	/* 0x0a */
-	"133 MHz PCIX 266",	/* 0x0b */
+	"66 MHz PCI-X 266",	/* 0x09 */
+	"100 MHz PCI-X 266",	/* 0x0a */
+	"133 MHz PCI-X 266",	/* 0x0b */
 	NULL,			/* 0x0c */
 	NULL,			/* 0x0d */
 	NULL,			/* 0x0e */
 	NULL,			/* 0x0f */
 	NULL,			/* 0x10 */
-	"66 MHz PCIX 533",	/* 0x11 */
-	"100 MHz PCIX 533",	/* 0x12 */
-	"133 MHz PCIX 533",	/* 0x13 */
-	"2.5 GT/s PCI-E",	/* 0x14 */
-	"5.0 GT/s PCI-E",	/* 0x15 */
+	"66 MHz PCI-X 533",	/* 0x11 */
+	"100 MHz PCI-X 533",	/* 0x12 */
+	"133 MHz PCI-X 533",	/* 0x13 */
+	"2.5 GT/s PCIe",	/* 0x14 */
+	"5.0 GT/s PCIe",	/* 0x15 */
 };
 
 #ifdef CONFIG_HOTPLUG_PCI_CPCI
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 3070f77..4ed76b4 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -91,7 +91,6 @@
 	struct slot *slot;
 	wait_queue_head_t queue;	/* sleep & wake process */
 	u32 slot_cap;
-	u8 cap_base;
 	struct timer_list poll_timer;
 	unsigned int cmd_busy:1;
 	unsigned int no_cmd_complete:1;
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index 37c8d3d..b09b083 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -87,7 +87,8 @@
 	/* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */
 	if (pciehp_get_hp_hw_control_from_firmware(pdev))
 		return -ENODEV;
-	if (!(pos = pci_find_capability(pdev, PCI_CAP_ID_EXP)))
+	pos = pci_pcie_cap(pdev);
+	if (!pos)
 		return -ENODEV;
 	pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &slot_cap);
 	slot = kzalloc(sizeof(*slot), GFP_KERNEL);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index bc23471..5674b20 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -72,18 +72,6 @@
 static int get_max_bus_speed	(struct hotplug_slot *slot, enum pci_bus_speed *value);
 static int get_cur_bus_speed	(struct hotplug_slot *slot, enum pci_bus_speed *value);
 
-static struct hotplug_slot_ops pciehp_hotplug_slot_ops = {
-	.set_attention_status =	set_attention_status,
-	.enable_slot =		enable_slot,
-	.disable_slot =		disable_slot,
-	.get_power_status =	get_power_status,
-	.get_attention_status =	get_attention_status,
-	.get_latch_status =	get_latch_status,
-	.get_adapter_status =	get_adapter_status,
-  	.get_max_bus_speed =	get_max_bus_speed,
-  	.get_cur_bus_speed =	get_cur_bus_speed,
-};
-
 /**
  * release_slot - free up the memory used by a slot
  * @hotplug_slot: slot to free
@@ -95,6 +83,7 @@
 	ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
 		 __func__, hotplug_slot_name(hotplug_slot));
 
+	kfree(hotplug_slot->ops);
 	kfree(hotplug_slot->info);
 	kfree(hotplug_slot);
 }
@@ -104,6 +93,7 @@
 	struct slot *slot = ctrl->slot;
 	struct hotplug_slot *hotplug = NULL;
 	struct hotplug_slot_info *info = NULL;
+	struct hotplug_slot_ops *ops = NULL;
 	char name[SLOT_NAME_SIZE];
 	int retval = -ENOMEM;
 
@@ -115,11 +105,28 @@
 	if (!info)
 		goto out;
 
+	/* Setup hotplug slot ops */
+	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
+	if (!ops)
+		goto out;
+	ops->enable_slot = enable_slot;
+	ops->disable_slot = disable_slot;
+	ops->get_power_status = get_power_status;
+	ops->get_adapter_status = get_adapter_status;
+	ops->get_max_bus_speed = get_max_bus_speed;
+	ops->get_cur_bus_speed = get_cur_bus_speed;
+	if (MRL_SENS(ctrl))
+		ops->get_latch_status = get_latch_status;
+	if (ATTN_LED(ctrl)) {
+		ops->get_attention_status = get_attention_status;
+		ops->set_attention_status = set_attention_status;
+	}
+
 	/* register this slot with the hotplug pci core */
 	hotplug->info = info;
 	hotplug->private = slot;
 	hotplug->release = &release_slot;
-	hotplug->ops = &pciehp_hotplug_slot_ops;
+	hotplug->ops = ops;
 	slot->hotplug_slot = hotplug;
 	snprintf(name, SLOT_NAME_SIZE, "%u", PSN(ctrl));
 
@@ -128,17 +135,12 @@
 		 ctrl->pcie->port->subordinate->number, PSN(ctrl));
 	retval = pci_hp_register(hotplug,
 				 ctrl->pcie->port->subordinate, 0, name);
-	if (retval) {
+	if (retval)
 		ctrl_err(ctrl,
 			 "pci_hp_register failed with error %d\n", retval);
-		goto out;
-	}
-	get_power_status(hotplug, &info->power_status);
-	get_attention_status(hotplug, &info->attention_status);
-	get_latch_status(hotplug, &info->latch_status);
-	get_adapter_status(hotplug, &info->adapter_status);
 out:
 	if (retval) {
+		kfree(ops);
 		kfree(info);
 		kfree(hotplug);
 	}
@@ -160,12 +162,7 @@
 	ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
 		  __func__, slot_name(slot));
 
-	hotplug_slot->info->attention_status = status;
-
-	if (ATTN_LED(slot->ctrl))
-		pciehp_set_attention_status(slot, status);
-
-	return 0;
+	return pciehp_set_attention_status(slot, status);
 }
 
 
@@ -193,92 +190,62 @@
 static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
 {
 	struct slot *slot = hotplug_slot->private;
-	int retval;
 
 	ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
 		  __func__, slot_name(slot));
 
-	retval = pciehp_get_power_status(slot, value);
-	if (retval < 0)
-		*value = hotplug_slot->info->power_status;
-
-	return 0;
+	return pciehp_get_power_status(slot, value);
 }
 
 static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
 {
 	struct slot *slot = hotplug_slot->private;
-	int retval;
 
 	ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
 		  __func__, slot_name(slot));
 
-	retval = pciehp_get_attention_status(slot, value);
-	if (retval < 0)
-		*value = hotplug_slot->info->attention_status;
-
-	return 0;
+	return pciehp_get_attention_status(slot, value);
 }
 
 static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
 {
 	struct slot *slot = hotplug_slot->private;
-	int retval;
 
 	ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
 		 __func__, slot_name(slot));
 
-	retval = pciehp_get_latch_status(slot, value);
-	if (retval < 0)
-		*value = hotplug_slot->info->latch_status;
-
-	return 0;
+	return pciehp_get_latch_status(slot, value);
 }
 
 static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
 {
 	struct slot *slot = hotplug_slot->private;
-	int retval;
 
 	ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
 		 __func__, slot_name(slot));
 
-	retval = pciehp_get_adapter_status(slot, value);
-	if (retval < 0)
-		*value = hotplug_slot->info->adapter_status;
-
-	return 0;
+	return pciehp_get_adapter_status(slot, value);
 }
 
 static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
 				enum pci_bus_speed *value)
 {
 	struct slot *slot = hotplug_slot->private;
-	int retval;
 
 	ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
 		 __func__, slot_name(slot));
 
-	retval = pciehp_get_max_link_speed(slot, value);
-	if (retval < 0)
-		*value = PCI_SPEED_UNKNOWN;
-
-	return 0;
+	return pciehp_get_max_link_speed(slot, value);
 }
 
 static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
 {
 	struct slot *slot = hotplug_slot->private;
-	int retval;
 
 	ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
 		 __func__, slot_name(slot));
 
-	retval = pciehp_get_cur_link_speed(slot, value);
-	if (retval < 0)
-		*value = PCI_SPEED_UNKNOWN;
-
-	return 0;
+	return pciehp_get_cur_link_speed(slot, value);
 }
 
 static int pciehp_probe(struct pcie_device *dev)
@@ -286,14 +253,13 @@
 	int rc;
 	struct controller *ctrl;
 	struct slot *slot;
-	u8 value;
-	struct pci_dev *pdev = dev->port;
+	u8 occupied, poweron;
 
 	if (pciehp_force)
 		dev_info(&dev->device,
 			 "Bypassing BIOS check for pciehp use on %s\n",
-			 pci_name(pdev));
-	else if (pciehp_get_hp_hw_control_from_firmware(pdev))
+			 pci_name(dev->port));
+	else if (pciehp_get_hp_hw_control_from_firmware(dev->port))
 		goto err_out_none;
 
 	ctrl = pcie_init(dev);
@@ -318,23 +284,18 @@
 	rc = pcie_init_notification(ctrl);
 	if (rc) {
 		ctrl_err(ctrl, "Notification initialization failed\n");
-		goto err_out_release_ctlr;
+		goto err_out_free_ctrl_slot;
 	}
 
 	/* Check if slot is occupied */
 	slot = ctrl->slot;
-	pciehp_get_adapter_status(slot, &value);
-	if (value) {
-		if (pciehp_force)
-			pciehp_enable_slot(slot);
-	} else {
-		/* Power off slot if not occupied */
-		if (POWER_CTRL(ctrl)) {
-			rc = pciehp_power_off_slot(slot);
-			if (rc)
-				goto err_out_free_ctrl_slot;
-		}
-	}
+	pciehp_get_adapter_status(slot, &occupied);
+	pciehp_get_power_status(slot, &poweron);
+	if (occupied && pciehp_force)
+		pciehp_enable_slot(slot);
+	/* If empty slot's power status is on, turn power off */
+	if (!occupied && poweron && POWER_CTRL(ctrl))
+		pciehp_power_off_slot(slot);
 
 	return 0;
 
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 84487d1..d6ac1b2 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -142,23 +142,9 @@
 
 	/* power fault */
 	ctrl_dbg(ctrl, "Power fault interrupt received\n");
-
-	if (!pciehp_query_power_fault(p_slot)) {
-		/*
-		 * power fault Cleared
-		 */
-		ctrl_info(ctrl, "Power fault cleared on Slot(%s)\n",
-			  slot_name(p_slot));
-		event_type = INT_POWER_FAULT_CLEAR;
-	} else {
-		/*
-		 *   power fault
-		 */
-		ctrl_info(ctrl, "Power fault on Slot(%s)\n", slot_name(p_slot));
-		event_type = INT_POWER_FAULT;
-		ctrl_info(ctrl, "Power fault bit %x set\n", 0);
-	}
-
+	ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot));
+	event_type = INT_POWER_FAULT;
+	ctrl_info(ctrl, "Power fault bit %x set\n", 0);
 	queue_interrupt_event(p_slot, event_type);
 
 	return 1;
@@ -224,13 +210,12 @@
 	retval = pciehp_check_link_status(ctrl);
 	if (retval) {
 		ctrl_err(ctrl, "Failed to check link status\n");
-		set_slot_off(ctrl, p_slot);
-		return retval;
+		goto err_exit;
 	}
 
 	/* Check for a power fault */
-	if (pciehp_query_power_fault(p_slot)) {
-		ctrl_dbg(ctrl, "Power fault detected\n");
+	if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) {
+		ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot));
 		retval = -EIO;
 		goto err_exit;
 	}
@@ -363,25 +348,6 @@
 	mutex_unlock(&p_slot->lock);
 }
 
-static int update_slot_info(struct slot *slot)
-{
-	struct hotplug_slot_info *info;
-	int result;
-
-	info = kmalloc(sizeof(*info), GFP_KERNEL);
-	if (!info)
-		return -ENOMEM;
-
-	pciehp_get_power_status(slot, &info->power_status);
-	pciehp_get_attention_status(slot, &info->attention_status);
-	pciehp_get_latch_status(slot, &info->latch_status);
-	pciehp_get_adapter_status(slot, &info->adapter_status);
-
-	result = pci_hp_change_slot_info(slot->hotplug_slot, info);
-	kfree (info);
-	return result;
-}
-
 /*
  * Note: This function must be called with slot->lock held
  */
@@ -442,7 +408,6 @@
 		 * to hot-add or hot-remove is undergoing
 		 */
 		ctrl_info(ctrl, "Button ignore on Slot(%s)\n", slot_name(p_slot));
-		update_slot_info(p_slot);
 		break;
 	default:
 		ctrl_warn(ctrl, "Not a valid state\n");
@@ -500,11 +465,9 @@
 		if (!HP_SUPR_RM(ctrl))
 			break;
 		ctrl_dbg(ctrl, "Surprise Removal\n");
-		update_slot_info(p_slot);
 		handle_surprise_event(p_slot);
 		break;
 	default:
-		update_slot_info(p_slot);
 		break;
 	}
 	mutex_unlock(&p_slot->lock);
@@ -547,9 +510,6 @@
 	if (rc) {
 		pciehp_get_latch_status(p_slot, &getstatus);
 	}
-
-	update_slot_info(p_slot);
-
 	return rc;
 }
 
@@ -590,10 +550,7 @@
 		}
 	}
 
-	ret = remove_board(p_slot);
-	update_slot_info(p_slot);
-
-	return ret;
+	return remove_board(p_slot);
 }
 
 int pciehp_sysfs_enable_slot(struct slot *p_slot)
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 9ef4605..10040d5 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -45,25 +45,25 @@
 static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value)
 {
 	struct pci_dev *dev = ctrl->pcie->port;
-	return pci_read_config_word(dev, ctrl->cap_base + reg, value);
+	return pci_read_config_word(dev, pci_pcie_cap(dev) + reg, value);
 }
 
 static inline int pciehp_readl(struct controller *ctrl, int reg, u32 *value)
 {
 	struct pci_dev *dev = ctrl->pcie->port;
-	return pci_read_config_dword(dev, ctrl->cap_base + reg, value);
+	return pci_read_config_dword(dev, pci_pcie_cap(dev) + reg, value);
 }
 
 static inline int pciehp_writew(struct controller *ctrl, int reg, u16 value)
 {
 	struct pci_dev *dev = ctrl->pcie->port;
-	return pci_write_config_word(dev, ctrl->cap_base + reg, value);
+	return pci_write_config_word(dev, pci_pcie_cap(dev) + reg, value);
 }
 
 static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value)
 {
 	struct pci_dev *dev = ctrl->pcie->port;
-	return pci_write_config_dword(dev, ctrl->cap_base + reg, value);
+	return pci_write_config_dword(dev, pci_pcie_cap(dev) + reg, value);
 }
 
 /* Power Control Command */
@@ -318,8 +318,8 @@
 		return retval;
 	}
 
-	ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n",
-		 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_ctrl);
+	ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__,
+		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
 
 	atten_led_state = (slot_ctrl & PCI_EXP_SLTCTL_AIC) >> 6;
 
@@ -356,8 +356,8 @@
 		ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
 		return retval;
 	}
-	ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n",
-		 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_ctrl);
+	ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__,
+		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
 
 	pwr_state = (slot_ctrl & PCI_EXP_SLTCTL_PCC) >> 10;
 
@@ -427,27 +427,24 @@
 	struct controller *ctrl = slot->ctrl;
 	u16 slot_cmd;
 	u16 cmd_mask;
-	int rc;
 
 	cmd_mask = PCI_EXP_SLTCTL_AIC;
 	switch (value) {
-		case 0 :	/* turn off */
-			slot_cmd = 0x00C0;
-			break;
-		case 1:		/* turn on */
-			slot_cmd = 0x0040;
-			break;
-		case 2:		/* turn blink */
-			slot_cmd = 0x0080;
-			break;
-		default:
-			return -1;
+	case 0 :	/* turn off */
+		slot_cmd = 0x00C0;
+		break;
+	case 1:		/* turn on */
+		slot_cmd = 0x0040;
+		break;
+	case 2:		/* turn blink */
+		slot_cmd = 0x0080;
+		break;
+	default:
+		return -EINVAL;
 	}
-	rc = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
-	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
-		 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
-
-	return rc;
+	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
+	return pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
 }
 
 void pciehp_green_led_on(struct slot *slot)
@@ -459,8 +456,8 @@
 	slot_cmd = 0x0100;
 	cmd_mask = PCI_EXP_SLTCTL_PIC;
 	pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
-	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
-		 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
+	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
 }
 
 void pciehp_green_led_off(struct slot *slot)
@@ -472,8 +469,8 @@
 	slot_cmd = 0x0300;
 	cmd_mask = PCI_EXP_SLTCTL_PIC;
 	pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
-	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
-		 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
+	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
 }
 
 void pciehp_green_led_blink(struct slot *slot)
@@ -485,8 +482,8 @@
 	slot_cmd = 0x0200;
 	cmd_mask = PCI_EXP_SLTCTL_PIC;
 	pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
-	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
-		 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
+	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
 }
 
 int pciehp_power_on_slot(struct slot * slot)
@@ -514,97 +511,38 @@
 			return retval;
 		}
 	}
+	ctrl->power_fault_detected = 0;
 
 	slot_cmd = POWER_ON;
 	cmd_mask = PCI_EXP_SLTCTL_PCC;
-	if (!pciehp_poll_mode) {
-		/* Enable power fault detection turned off at power off time */
-		slot_cmd |= PCI_EXP_SLTCTL_PFDE;
-		cmd_mask |= PCI_EXP_SLTCTL_PFDE;
-	}
-
 	retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
 	if (retval) {
 		ctrl_err(ctrl, "Write %x command failed!\n", slot_cmd);
 		return retval;
 	}
-	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
-		 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
+	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
 
-	ctrl->power_fault_detected = 0;
 	return retval;
 }
 
-static inline int pcie_mask_bad_dllp(struct controller *ctrl)
-{
-	struct pci_dev *dev = ctrl->pcie->port;
-	int pos;
-	u32 reg;
-
-	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
-	if (!pos)
-		return 0;
-	pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg);
-	if (reg & PCI_ERR_COR_BAD_DLLP)
-		return 0;
-	reg |= PCI_ERR_COR_BAD_DLLP;
-	pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg);
-	return 1;
-}
-
-static inline void pcie_unmask_bad_dllp(struct controller *ctrl)
-{
-	struct pci_dev *dev = ctrl->pcie->port;
-	u32 reg;
-	int pos;
-
-	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
-	if (!pos)
-		return;
-	pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg);
-	if (!(reg & PCI_ERR_COR_BAD_DLLP))
-		return;
-	reg &= ~PCI_ERR_COR_BAD_DLLP;
-	pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg);
-}
-
 int pciehp_power_off_slot(struct slot * slot)
 {
 	struct controller *ctrl = slot->ctrl;
 	u16 slot_cmd;
 	u16 cmd_mask;
-	int retval = 0;
-	int changed;
-
-	/*
-	 * Set Bad DLLP Mask bit in Correctable Error Mask
-	 * Register. This is the workaround against Bad DLLP error
-	 * that sometimes happens during turning power off the slot
-	 * which conforms to PCI Express 1.0a spec.
-	 */
-	changed = pcie_mask_bad_dllp(ctrl);
+	int retval;
 
 	slot_cmd = POWER_OFF;
 	cmd_mask = PCI_EXP_SLTCTL_PCC;
-	if (!pciehp_poll_mode) {
-		/* Disable power fault detection */
-		slot_cmd &= ~PCI_EXP_SLTCTL_PFDE;
-		cmd_mask |= PCI_EXP_SLTCTL_PFDE;
-	}
-
 	retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
 	if (retval) {
 		ctrl_err(ctrl, "Write command failed!\n");
-		retval = -1;
-		goto out;
+		return retval;
 	}
-	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
-		 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
- out:
-	if (changed)
-		pcie_unmask_bad_dllp(ctrl);
-
-	return retval;
+	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
+	return 0;
 }
 
 static irqreturn_t pcie_isr(int irq, void *dev_id)
@@ -840,11 +778,19 @@
 {
 	u16 cmd, mask;
 
+	/*
+	 * TBD: Power fault detected software notification support.
+	 *
+	 * Power fault detected software notification is not enabled
+	 * now, because it caused power fault detected interrupt storm
+	 * on some machines. On those machines, power fault detected
+	 * bit in the slot status register was set again immediately
+	 * when it is cleared in the interrupt service routine, and
+	 * next power fault detected interrupt was notified again.
+	 */
 	cmd = PCI_EXP_SLTCTL_PDCE;
 	if (ATTN_BUTTN(ctrl))
 		cmd |= PCI_EXP_SLTCTL_ABPE;
-	if (POWER_CTRL(ctrl))
-		cmd |= PCI_EXP_SLTCTL_PFDE;
 	if (MRL_SENS(ctrl))
 		cmd |= PCI_EXP_SLTCTL_MRLSCE;
 	if (!pciehp_poll_mode)
@@ -866,7 +812,8 @@
 	u16 mask;
 	mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
 		PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
-		PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE);
+		PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
+		PCI_EXP_SLTCTL_DLLSCE);
 	if (pcie_write_cmd(ctrl, 0, mask))
 		ctrl_warn(ctrl, "Cannot disable software notification\n");
 }
@@ -934,7 +881,8 @@
 		  pdev->subsystem_device);
 	ctrl_info(ctrl, "  Subsystem Vendor ID  : 0x%04x\n",
 		  pdev->subsystem_vendor);
-	ctrl_info(ctrl, "  PCIe Cap offset      : 0x%02x\n", ctrl->cap_base);
+	ctrl_info(ctrl, "  PCIe Cap offset      : 0x%02x\n",
+		  pci_pcie_cap(pdev));
 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
 		if (!pci_resource_len(pdev, i))
 			continue;
@@ -978,8 +926,7 @@
 		goto abort;
 	}
 	ctrl->pcie = dev;
-	ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP);
-	if (!ctrl->cap_base) {
+	if (!pci_pcie_cap(pdev)) {
 		ctrl_err(ctrl, "Cannot find PCI Express capability\n");
 		goto abort_ctrl;
 	}
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c
index cc8ec3a..80b461c 100644
--- a/drivers/pci/hotplug/pcihp_slot.c
+++ b/drivers/pci/hotplug/pcihp_slot.c
@@ -43,7 +43,7 @@
 		 * Perhaps we *should* use default settings for PCIe, but
 		 * pciehp didn't, so we won't either.
 		 */
-		if (dev->is_pcie)
+		if (pci_is_pcie(dev))
 			return;
 		dev_info(&dev->dev, "using default PCI settings\n");
 		hpp = &pci_default_type0;
@@ -102,7 +102,7 @@
 		return;
 
 	/* Find PCI Express capability */
-	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+	pos = pci_pcie_cap(dev);
 	if (!pos)
 		return;
 
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 9261327..8d61594 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1611,7 +1611,7 @@
 			return ret;
 		parent = parent->bus->self;
 	}
-	if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
+	if (pci_is_pcie(tmp)) /* this is a PCIE-to-PCI bridge */
 		return domain_context_mapping_one(domain,
 					pci_domain_nr(tmp->subordinate),
 					tmp->subordinate->number, 0,
@@ -1651,7 +1651,7 @@
 			return ret;
 		parent = parent->bus->self;
 	}
-	if (tmp->is_pcie)
+	if (pci_is_pcie(tmp))
 		return device_context_mapped(iommu, tmp->subordinate->number,
 					     0);
 	else
@@ -1821,7 +1821,7 @@
 
 	dev_tmp = pci_find_upstream_pcie_bridge(pdev);
 	if (dev_tmp) {
-		if (dev_tmp->is_pcie) {
+		if (pci_is_pcie(dev_tmp)) {
 			bus = dev_tmp->subordinate->number;
 			devfn = 0;
 		} else {
@@ -2182,7 +2182,7 @@
 	 * the 1:1 domain, just in _case_ one of their siblings turns out
 	 * not to be able to map all of memory.
 	 */
-	if (!pdev->is_pcie) {
+	if (!pci_is_pcie(pdev)) {
 		if (!pci_is_root_bus(pdev->bus))
 			return 0;
 		if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
@@ -3319,7 +3319,7 @@
 					 parent->devfn);
 			parent = parent->bus->self;
 		}
-		if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
+		if (pci_is_pcie(tmp)) /* this is a PCIE-to-PCI bridge */
 			iommu_detach_dev(iommu,
 				tmp->subordinate->number, 0);
 		else /* this is a legacy PCI bridge */
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 3b36586..1487bf2 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -520,7 +520,7 @@
 		return -1;
 
 	/* PCIe device or Root Complex integrated PCI device */
-	if (dev->is_pcie || !dev->bus->parent) {
+	if (pci_is_pcie(dev) || !dev->bus->parent) {
 		set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
 			     (dev->bus->number << 8) | dev->devfn);
 		return 0;
@@ -528,7 +528,7 @@
 
 	bridge = pci_find_upstream_pcie_bridge(dev);
 	if (bridge) {
-		if (bridge->is_pcie) /* this is a PCIE-to-PCI/PCIX bridge */
+		if (pci_is_pcie(bridge))/* this is a PCIE-to-PCI/PCIX bridge */
 			set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
 				(bridge->bus->number << 8) | dev->bus->number);
 		else /* this is a legacy PCI bridge */
diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c
new file mode 100644
index 0000000..3e0d7b5
--- /dev/null
+++ b/drivers/pci/ioapic.c
@@ -0,0 +1,127 @@
+/*
+ * IOAPIC/IOxAPIC/IOSAPIC driver
+ *
+ * Copyright (C) 2009 Fujitsu Limited.
+ * (c) Copyright 2009 Hewlett-Packard Development Company, L.P.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This driver manages PCI I/O APICs added by hotplug after boot.  We try to
+ * claim all I/O APIC PCI devices, but those present at boot were registered
+ * when we parsed the ACPI MADT, so we'll fail when we try to re-register
+ * them.
+ */
+
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+
+struct ioapic {
+	acpi_handle	handle;
+	u32		gsi_base;
+};
+
+static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent)
+{
+	acpi_handle handle;
+	acpi_status status;
+	unsigned long long gsb;
+	struct ioapic *ioapic;
+	u64 addr;
+	int ret;
+	char *type;
+
+	handle = DEVICE_ACPI_HANDLE(&dev->dev);
+	if (!handle)
+		return -EINVAL;
+
+	status = acpi_evaluate_integer(handle, "_GSB", NULL, &gsb);
+	if (ACPI_FAILURE(status))
+		return -EINVAL;
+
+	/*
+	 * The previous code in acpiphp evaluated _MAT if _GSB failed, but
+	 * ACPI spec 4.0 sec 6.2.2 requires _GSB for hot-pluggable I/O APICs.
+	 */
+
+	ioapic = kzalloc(sizeof(*ioapic), GFP_KERNEL);
+	if (!ioapic)
+		return -ENOMEM;
+
+	ioapic->handle = handle;
+	ioapic->gsi_base = (u32) gsb;
+
+	if (dev->class == PCI_CLASS_SYSTEM_PIC_IOAPIC)
+		type = "IOAPIC";
+	else
+		type = "IOxAPIC";
+
+	ret = pci_enable_device(dev);
+	if (ret < 0)
+		goto exit_free;
+
+	pci_set_master(dev);
+
+	if (pci_request_region(dev, 0, type))
+		goto exit_disable;
+
+	addr = pci_resource_start(dev, 0);
+	if (acpi_register_ioapic(ioapic->handle, addr, ioapic->gsi_base))
+		goto exit_release;
+
+	pci_set_drvdata(dev, ioapic);
+	dev_info(&dev->dev, "%s at %#llx, GSI %u\n", type, addr,
+		 ioapic->gsi_base);
+	return 0;
+
+exit_release:
+	pci_release_region(dev, 0);
+exit_disable:
+	pci_disable_device(dev);
+exit_free:
+	kfree(ioapic);
+	return -ENODEV;
+}
+
+static void ioapic_remove(struct pci_dev *dev)
+{
+	struct ioapic *ioapic = pci_get_drvdata(dev);
+
+	acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base);
+	pci_release_region(dev, 0);
+	pci_disable_device(dev);
+	kfree(ioapic);
+}
+
+
+static struct pci_device_id ioapic_devices[] = {
+	{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+	  PCI_CLASS_SYSTEM_PIC_IOAPIC << 8, 0xffff00, },
+	{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+	  PCI_CLASS_SYSTEM_PIC_IOXAPIC << 8, 0xffff00, },
+	{ }
+};
+
+static struct pci_driver ioapic_driver = {
+	.name		= "ioapic",
+	.id_table	= ioapic_devices,
+	.probe		= ioapic_probe,
+	.remove		= __devexit_p(ioapic_remove),
+};
+
+static int __init ioapic_init(void)
+{
+	return pci_register_driver(&ioapic_driver);
+}
+
+static void __exit ioapic_exit(void)
+{
+	pci_unregister_driver(&ioapic_driver);
+}
+
+module_init(ioapic_init);
+module_exit(ioapic_exit);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index e03fe98..b2a448e 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -555,7 +555,7 @@
 {
 	int pos;
 
-	if (!dev->is_pcie)
+	if (!pci_is_pcie(dev))
 		return -ENODEV;
 
 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 33317df..cc617dd 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -116,7 +116,7 @@
 		int ret;
 
 		ret = acpi_pm_device_sleep_wake(&bridge->dev, enable);
-		if (!ret || bridge->is_pcie)
+		if (!ret || pci_is_pcie(bridge))
 			return;
 		bus = bus->parent;
 	}
@@ -131,7 +131,7 @@
 	if (acpi_pci_can_wakeup(dev))
 		return acpi_pm_device_sleep_wake(&dev->dev, enable);
 
-	if (!dev->is_pcie)
+	if (!pci_is_pcie(dev))
 		acpi_pci_propagate_wakeup_enable(dev->bus, enable);
 
 	return 0;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 0f6382f..c5df94e 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -74,7 +74,11 @@
 	const struct cpumask *mask;
 	int len;
 
+#ifdef CONFIG_NUMA
+	mask = cpumask_of_node(dev_to_node(dev));
+#else
 	mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
+#endif
 	len = cpumask_scnprintf(buf, PAGE_SIZE-2, mask);
 	buf[len++] = '\n';
 	buf[len] = '\0';
@@ -88,7 +92,11 @@
 	const struct cpumask *mask;
 	int len;
 
+#ifdef CONFIG_NUMA
+	mask = cpumask_of_node(dev_to_node(dev));
+#else
 	mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
+#endif
 	len = cpulist_scnprintf(buf, PAGE_SIZE-2, mask);
 	buf[len++] = '\n';
 	buf[len] = '\0';
@@ -176,6 +184,21 @@
 #endif
 
 static ssize_t
+dma_mask_bits_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	return sprintf (buf, "%d\n", fls64(pdev->dma_mask));
+}
+
+static ssize_t
+consistent_dma_mask_bits_show(struct device *dev, struct device_attribute *attr,
+				 char *buf)
+{
+	return sprintf (buf, "%d\n", fls64(dev->coherent_dma_mask));
+}
+
+static ssize_t
 msi_bus_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
@@ -306,6 +329,8 @@
 #ifdef CONFIG_NUMA
 	__ATTR_RO(numa_node),
 #endif
+	__ATTR_RO(dma_mask_bits),
+	__ATTR_RO(consistent_dma_mask_bits),
 	__ATTR(enable, 0600, is_enabled_show, is_enabled_store),
 	__ATTR(broken_parity_status,(S_IRUGO|S_IWUSR),
 		broken_parity_status_show,broken_parity_status_store),
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 4e4c295..0bc27e0 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -47,6 +47,15 @@
 unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
 
+/*
+ * The default CLS is used if arch didn't set CLS explicitly and not
+ * all pci devices agree on the same value.  Arch can override either
+ * the dfl or actual value as it sees fit.  Don't forget this is
+ * measured in 32-bit words, not bytes.
+ */
+u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
+u8 pci_cache_line_size;
+
 /**
  * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
  * @bus: pointer to PCI bus structure to search
@@ -373,8 +382,12 @@
 			continue;	/* Wrong type */
 		if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
 			return r;	/* Exact match */
-		if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH))
-			best = r;	/* Approximating prefetchable by non-prefetchable */
+		/* We can't insert a non-prefetch resource inside a prefetchable parent .. */
+		if (r->flags & IORESOURCE_PREFETCH)
+			continue;
+		/* .. but we can put a prefetchable resource inside a non-prefetchable one */
+		if (!best)
+			best = r;
 	}
 	return best;
 }
@@ -728,8 +741,8 @@
 	u16 *cap;
 	u16 flags;
 
-	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
-	if (pos <= 0)
+	pos = pci_pcie_cap(dev);
+	if (!pos)
 		return 0;
 
 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
@@ -837,7 +850,7 @@
 	int i;
 	/* XXX: 100% dword access ok here? */
 	for (i = 0; i < 16; i++)
-		pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]);
+		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
 	dev->state_saved = true;
 	if ((i = pci_save_pcie_state(dev)) != 0)
 		return i;
@@ -1202,7 +1215,7 @@
 
 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
 
-	dev_printk(KERN_INFO, &dev->dev, "PME# %s\n",
+	dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
 			enable ? "enabled" : "disabled");
 }
 
@@ -1413,7 +1426,8 @@
 
 	pmc &= PCI_PM_CAP_PME_MASK;
 	if (pmc) {
-		dev_info(&dev->dev, "PME# supported from%s%s%s%s%s\n",
+		dev_printk(KERN_DEBUG, &dev->dev,
+			 "PME# supported from%s%s%s%s%s\n",
 			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
 			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
 			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
@@ -1510,7 +1524,7 @@
 	u16 ctrl;
 	struct pci_dev *bridge;
 
-	if (!dev->is_pcie || dev->devfn)
+	if (!pci_is_pcie(dev) || dev->devfn)
 		return;
 
 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
@@ -1518,10 +1532,10 @@
 		return;
 
 	bridge = dev->bus->self;
-	if (!bridge || !bridge->is_pcie)
+	if (!bridge || !pci_is_pcie(bridge))
 		return;
 
-	pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
+	pos = pci_pcie_cap(bridge);
 	if (!pos)
 		return;
 
@@ -1536,6 +1550,54 @@
 	bridge->ari_enabled = 1;
 }
 
+static int pci_acs_enable;
+
+/**
+ * pci_request_acs - ask for ACS to be enabled if supported
+ */
+void pci_request_acs(void)
+{
+	pci_acs_enable = 1;
+}
+
+/**
+ * pci_enable_acs - enable ACS if hardware support it
+ * @dev: the PCI device
+ */
+void pci_enable_acs(struct pci_dev *dev)
+{
+	int pos;
+	u16 cap;
+	u16 ctrl;
+
+	if (!pci_acs_enable)
+		return;
+
+	if (!pci_is_pcie(dev))
+		return;
+
+	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
+	if (!pos)
+		return;
+
+	pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
+	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
+
+	/* Source Validation */
+	ctrl |= (cap & PCI_ACS_SV);
+
+	/* P2P Request Redirect */
+	ctrl |= (cap & PCI_ACS_RR);
+
+	/* P2P Completion Redirect */
+	ctrl |= (cap & PCI_ACS_CR);
+
+	/* Upstream Forwarding */
+	ctrl |= (cap & PCI_ACS_UF);
+
+	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
+}
+
 /**
  * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
  * @dev: the PCI device
@@ -1669,9 +1731,7 @@
 	return 0;
 
 err_out:
-	dev_warn(&pdev->dev, "BAR %d: can't reserve %s region %pR\n",
-		 bar,
-		 pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem",
+	dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
 		 &pdev->resource[bar]);
 	return -EBUSY;
 }
@@ -1866,31 +1926,6 @@
 	__pci_set_master(dev, false);
 }
 
-#ifdef PCI_DISABLE_MWI
-int pci_set_mwi(struct pci_dev *dev)
-{
-	return 0;
-}
-
-int pci_try_set_mwi(struct pci_dev *dev)
-{
-	return 0;
-}
-
-void pci_clear_mwi(struct pci_dev *dev)
-{
-}
-
-#else
-
-#ifndef PCI_CACHE_LINE_BYTES
-#define PCI_CACHE_LINE_BYTES L1_CACHE_BYTES
-#endif
-
-/* This can be overridden by arch code. */
-/* Don't forget this is measured in 32-bit words, not bytes */
-u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4;
-
 /**
  * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
  * @dev: the PCI device for which MWI is to be enabled
@@ -1901,13 +1936,12 @@
  *
  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
  */
-static int
-pci_set_cacheline_size(struct pci_dev *dev)
+int pci_set_cacheline_size(struct pci_dev *dev)
 {
 	u8 cacheline_size;
 
 	if (!pci_cache_line_size)
-		return -EINVAL;		/* The system doesn't support MWI. */
+		return -EINVAL;
 
 	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
 	   equal to or multiple of the right value. */
@@ -1928,6 +1962,24 @@
 
 	return -EINVAL;
 }
+EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
+
+#ifdef PCI_DISABLE_MWI
+int pci_set_mwi(struct pci_dev *dev)
+{
+	return 0;
+}
+
+int pci_try_set_mwi(struct pci_dev *dev)
+{
+	return 0;
+}
+
+void pci_clear_mwi(struct pci_dev *dev)
+{
+}
+
+#else
 
 /**
  * pci_set_mwi - enables memory-write-invalidate PCI transaction
@@ -2062,6 +2114,7 @@
 		return -EIO;
 
 	dev->dma_mask = mask;
+	dev_dbg(&dev->dev, "using %dbit DMA mask\n", fls64(mask));
 
 	return 0;
 }
@@ -2073,6 +2126,7 @@
 		return -EIO;
 
 	dev->dev.coherent_dma_mask = mask;
+	dev_dbg(&dev->dev, "using %dbit consistent DMA mask\n", fls64(mask));
 
 	return 0;
 }
@@ -2099,9 +2153,9 @@
 	int i;
 	int pos;
 	u32 cap;
-	u16 status;
+	u16 status, control;
 
-	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+	pos = pci_pcie_cap(dev);
 	if (!pos)
 		return -ENOTTY;
 
@@ -2126,8 +2180,10 @@
 			"proceeding with reset anyway\n");
 
 clear:
-	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
-				PCI_EXP_DEVCTL_BCR_FLR);
+	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
+	control |= PCI_EXP_DEVCTL_BCR_FLR;
+	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
+
 	msleep(100);
 
 	return 0;
@@ -2450,7 +2506,7 @@
 	int ret, cap;
 	u16 ctl;
 
-	cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
+	cap = pci_pcie_cap(dev);
 	if (!cap)
 		return -EINVAL;
 
@@ -2480,7 +2536,7 @@
 
 	v = (ffs(rq) - 8) << 12;
 
-	cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
+	cap = pci_pcie_cap(dev);
 	if (!cap)
 		goto out;
 
@@ -2540,7 +2596,7 @@
 			return reg;
 	}
 
-	dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno);
+	dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
 	return 0;
 }
 
@@ -2590,7 +2646,7 @@
 
 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
 static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
-spinlock_t resource_alignment_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(resource_alignment_lock);
 
 /**
  * pci_specified_resource_alignment - get resource alignment specified by user.
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d92d195..33ed8e0 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -311,4 +311,6 @@
 	return resource_alignment(res);
 }
 
+extern void pci_enable_acs(struct pci_dev *dev);
+
 #endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index 62d15f6..7fcd5331 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -23,6 +23,7 @@
 #include <linux/pci.h>
 #include <linux/fs.h>
 #include <linux/uaccess.h>
+#include <linux/stddef.h>
 #include "aerdrv.h"
 
 struct aer_error_inj {
@@ -35,10 +36,12 @@
 	u32 header_log1;
 	u32 header_log2;
 	u32 header_log3;
+	u16 domain;
 };
 
 struct aer_error {
 	struct list_head list;
+	u16 domain;
 	unsigned int bus;
 	unsigned int devfn;
 	int pos_cap_err;
@@ -66,22 +69,27 @@
 /* Protect einjected and pci_bus_ops_list */
 static DEFINE_SPINLOCK(inject_lock);
 
-static void aer_error_init(struct aer_error *err, unsigned int bus,
-			   unsigned int devfn, int pos_cap_err)
+static void aer_error_init(struct aer_error *err, u16 domain,
+			   unsigned int bus, unsigned int devfn,
+			   int pos_cap_err)
 {
 	INIT_LIST_HEAD(&err->list);
+	err->domain = domain;
 	err->bus = bus;
 	err->devfn = devfn;
 	err->pos_cap_err = pos_cap_err;
 }
 
 /* inject_lock must be held before calling */
-static struct aer_error *__find_aer_error(unsigned int bus, unsigned int devfn)
+static struct aer_error *__find_aer_error(u16 domain, unsigned int bus,
+					  unsigned int devfn)
 {
 	struct aer_error *err;
 
 	list_for_each_entry(err, &einjected, list) {
-		if (bus == err->bus && devfn == err->devfn)
+		if (domain == err->domain &&
+		    bus == err->bus &&
+		    devfn == err->devfn)
 			return err;
 	}
 	return NULL;
@@ -90,7 +98,10 @@
 /* inject_lock must be held before calling */
 static struct aer_error *__find_aer_error_by_dev(struct pci_dev *dev)
 {
-	return __find_aer_error(dev->bus->number, dev->devfn);
+	int domain = pci_domain_nr(dev->bus);
+	if (domain < 0)
+		return NULL;
+	return __find_aer_error((u16)domain, dev->bus->number, dev->devfn);
 }
 
 /* inject_lock must be held before calling */
@@ -172,11 +183,15 @@
 	struct aer_error *err;
 	unsigned long flags;
 	struct pci_ops *ops;
+	int domain;
 
 	spin_lock_irqsave(&inject_lock, flags);
 	if (size != sizeof(u32))
 		goto out;
-	err = __find_aer_error(bus->number, devfn);
+	domain = pci_domain_nr(bus);
+	if (domain < 0)
+		goto out;
+	err = __find_aer_error((u16)domain, bus->number, devfn);
 	if (!err)
 		goto out;
 
@@ -200,11 +215,15 @@
 	unsigned long flags;
 	int rw1cs;
 	struct pci_ops *ops;
+	int domain;
 
 	spin_lock_irqsave(&inject_lock, flags);
 	if (size != sizeof(u32))
 		goto out;
-	err = __find_aer_error(bus->number, devfn);
+	domain = pci_domain_nr(bus);
+	if (domain < 0)
+		goto out;
+	err = __find_aer_error((u16)domain, bus->number, devfn);
 	if (!err)
 		goto out;
 
@@ -262,7 +281,7 @@
 static struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
 {
 	while (1) {
-		if (!dev->is_pcie)
+		if (!pci_is_pcie(dev))
 			break;
 		if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
 			return dev;
@@ -305,25 +324,25 @@
 	u32 sever;
 	int ret = 0;
 
-	dev = pci_get_bus_and_slot(einj->bus, devfn);
+	dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn);
 	if (!dev)
-		return -EINVAL;
+		return -ENODEV;
 	rpdev = pcie_find_root_port(dev);
 	if (!rpdev) {
-		ret = -EINVAL;
+		ret = -ENOTTY;
 		goto out_put;
 	}
 
 	pos_cap_err = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
 	if (!pos_cap_err) {
-		ret = -EIO;
+		ret = -ENOTTY;
 		goto out_put;
 	}
 	pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever);
 
 	rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR);
 	if (!rp_pos_cap_err) {
-		ret = -EIO;
+		ret = -ENOTTY;
 		goto out_put;
 	}
 
@@ -344,7 +363,8 @@
 	if (!err) {
 		err = err_alloc;
 		err_alloc = NULL;
-		aer_error_init(err, einj->bus, devfn, pos_cap_err);
+		aer_error_init(err, einj->domain, einj->bus, devfn,
+			       pos_cap_err);
 		list_add(&err->list, &einjected);
 	}
 	err->uncor_status |= einj->uncor_status;
@@ -358,7 +378,8 @@
 	if (!rperr) {
 		rperr = rperr_alloc;
 		rperr_alloc = NULL;
-		aer_error_init(rperr, rpdev->bus->number, rpdev->devfn,
+		aer_error_init(rperr, pci_domain_nr(rpdev->bus),
+			       rpdev->bus->number, rpdev->devfn,
 			       rp_pos_cap_err);
 		list_add(&rperr->list, &einjected);
 	}
@@ -411,10 +432,11 @@
 
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
-
-	if (usize != sizeof(struct aer_error_inj))
+	if (usize < offsetof(struct aer_error_inj, domain) ||
+	    usize > sizeof(einj))
 		return -EINVAL;
 
+	memset(&einj, 0, sizeof(einj));
 	if (copy_from_user(&einj, ubuf, usize))
 		return -EFAULT;
 
@@ -452,7 +474,7 @@
 	}
 
 	spin_lock_irqsave(&inject_lock, flags);
-	list_for_each_entry_safe(err, err_next, &pci_bus_ops_list, list) {
+	list_for_each_entry_safe(err, err_next, &einjected, list) {
 		list_del(&err->list);
 		kfree(err);
 	}
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 40c3cc5..97a3459 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -53,7 +53,7 @@
 
 static struct pcie_port_service_driver aerdriver = {
 	.name		= "aer",
-	.port_type	= PCIE_RC_PORT,
+	.port_type	= PCI_EXP_TYPE_ROOT_PORT,
 	.service	= PCIE_PORT_SERVICE_AER,
 
 	.probe		= aer_probe,
@@ -295,7 +295,7 @@
 	u16 reg16;
 
 	/* Clean up Root device status */
-	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+	pos = pci_pcie_cap(dev);
 	pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &reg16);
 	pci_write_config_word(dev, pos + PCI_EXP_DEVSTA, reg16);
 
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 9f5ccbe..ae672ca 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -35,11 +35,14 @@
 	u16 reg16 = 0;
 	int pos;
 
+	if (dev->aer_firmware_first)
+		return -EIO;
+
 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
 	if (!pos)
 		return -EIO;
 
-	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+	pos = pci_pcie_cap(dev);
 	if (!pos)
 		return -EIO;
 
@@ -60,7 +63,10 @@
 	u16 reg16 = 0;
 	int pos;
 
-	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+	if (dev->aer_firmware_first)
+		return -EIO;
+
+	pos = pci_pcie_cap(dev);
 	if (!pos)
 		return -EIO;
 
@@ -78,48 +84,27 @@
 int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
 {
 	int pos;
-	u32 status, mask;
-
-	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
-	if (!pos)
-		return -EIO;
-
-	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
-	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
-	if (dev->error_state == pci_channel_io_normal)
-		status &= ~mask; /* Clear corresponding nonfatal bits */
-	else
-		status &= mask; /* Clear corresponding fatal bits */
-	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
-
-#if 0
-int pci_cleanup_aer_correct_error_status(struct pci_dev *dev)
-{
-	int pos;
 	u32 status;
 
 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
 	if (!pos)
 		return -EIO;
 
-	pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
-	pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, status);
+	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
+	if (status)
+		pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
 
 	return 0;
 }
-#endif  /*  0  */
+EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
 
 static int set_device_error_reporting(struct pci_dev *dev, void *data)
 {
 	bool enable = *((bool *)data);
 
-	if (dev->pcie_type == PCIE_RC_PORT ||
-	    dev->pcie_type == PCIE_SW_UPSTREAM_PORT ||
-	    dev->pcie_type == PCIE_SW_DOWNSTREAM_PORT) {
+	if ((dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) ||
+	    (dev->pcie_type == PCI_EXP_TYPE_UPSTREAM) ||
+	    (dev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)) {
 		if (enable)
 			pci_enable_pcie_error_reporting(dev);
 		else
@@ -218,7 +203,7 @@
 	 */
 	if (atomic_read(&dev->enable_cnt) == 0)
 		return 0;
-	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+	pos = pci_pcie_cap(dev);
 	if (!pos)
 		return 0;
 	/* Check if AER is enabled */
@@ -431,10 +416,9 @@
 	result = (struct find_aer_service_data *) data;
 
 	if (device->bus == &pcie_port_bus_type) {
-		struct pcie_port_data *port_data;
+		struct pcie_device *pcie = to_pcie_device(device);
 
-		port_data = pci_get_drvdata(to_pcie_device(device)->port);
-		if (port_data->port_type == PCIE_SW_DOWNSTREAM_PORT)
+		if (pcie->port->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)
 			result->is_downstream = 1;
 
 		driver = device->driver;
@@ -612,7 +596,7 @@
 	u16 reg16;
 	u32 reg32;
 
-	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+	pos = pci_pcie_cap(pdev);
 	/* Clear PCIE Capability's Device Status */
 	pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, &reg16);
 	pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16);
@@ -874,8 +858,22 @@
  */
 int aer_init(struct pcie_device *dev)
 {
-	if (aer_osc_setup(dev) && !forceload)
-		return -ENXIO;
+	if (dev->port->aer_firmware_first) {
+		dev_printk(KERN_DEBUG, &dev->device,
+			   "PCIe errors handled by platform firmware.\n");
+		goto out;
+	}
+
+	if (aer_osc_setup(dev))
+		goto out;
 
 	return 0;
+out:
+	if (forceload) {
+		dev_printk(KERN_DEBUG, &dev->device,
+			   "aerdrv forceload requested.\n");
+		dev->port->aer_firmware_first = 0;
+		return 0;
+	}
+	return -ENXIO;
 }
diff --git a/drivers/pci/pcie/aer/ecrc.c b/drivers/pci/pcie/aer/ecrc.c
index a928d8a..a2747a6 100644
--- a/drivers/pci/pcie/aer/ecrc.c
+++ b/drivers/pci/pcie/aer/ecrc.c
@@ -51,7 +51,7 @@
 	int pos;
 	u32 reg32;
 
-	if (!dev->is_pcie)
+	if (!pci_is_pcie(dev))
 		return -ENODEV;
 
 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
@@ -79,7 +79,7 @@
 	int pos;
 	u32 reg32;
 
-	if (!dev->is_pcie)
+	if (!pci_is_pcie(dev))
 		return -ENODEV;
 
 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 5b7056c..5a01fc7 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -122,7 +122,7 @@
 	struct pci_bus *linkbus = link->pdev->subordinate;
 
 	list_for_each_entry(child, &linkbus->devices, bus_list) {
-		pos = pci_find_capability(child, PCI_CAP_ID_EXP);
+		pos = pci_pcie_cap(child);
 		if (!pos)
 			return;
 		pci_read_config_word(child, pos + PCI_EXP_LNKCTL, &reg16);
@@ -156,7 +156,7 @@
 
 	/* All functions should have the same cap and state, take the worst */
 	list_for_each_entry(child, &linkbus->devices, bus_list) {
-		pos = pci_find_capability(child, PCI_CAP_ID_EXP);
+		pos = pci_pcie_cap(child);
 		if (!pos)
 			return;
 		pci_read_config_dword(child, pos + PCI_EXP_LNKCAP, &reg32);
@@ -191,23 +191,23 @@
 	 * Configuration, so just check one function
 	 */
 	child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
-	BUG_ON(!child->is_pcie);
+	BUG_ON(!pci_is_pcie(child));
 
 	/* Check downstream component if bit Slot Clock Configuration is 1 */
-	cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
+	cpos = pci_pcie_cap(child);
 	pci_read_config_word(child, cpos + PCI_EXP_LNKSTA, &reg16);
 	if (!(reg16 & PCI_EXP_LNKSTA_SLC))
 		same_clock = 0;
 
 	/* Check upstream component if bit Slot Clock Configuration is 1 */
-	ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+	ppos = pci_pcie_cap(parent);
 	pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, &reg16);
 	if (!(reg16 & PCI_EXP_LNKSTA_SLC))
 		same_clock = 0;
 
 	/* Configure downstream component, all functions */
 	list_for_each_entry(child, &linkbus->devices, bus_list) {
-		cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
+		cpos = pci_pcie_cap(child);
 		pci_read_config_word(child, cpos + PCI_EXP_LNKCTL, &reg16);
 		child_reg[PCI_FUNC(child->devfn)] = reg16;
 		if (same_clock)
@@ -247,7 +247,7 @@
 	dev_printk(KERN_ERR, &parent->dev,
 		   "ASPM: Could not configure common clock\n");
 	list_for_each_entry(child, &linkbus->devices, bus_list) {
-		cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
+		cpos = pci_pcie_cap(child);
 		pci_write_config_word(child, cpos + PCI_EXP_LNKCTL,
 				      child_reg[PCI_FUNC(child->devfn)]);
 	}
@@ -300,7 +300,7 @@
 	u16 reg16;
 	u32 reg32;
 
-	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+	pos = pci_pcie_cap(pdev);
 	pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &reg32);
 	info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
 	info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
@@ -420,7 +420,7 @@
 		    child->pcie_type != PCI_EXP_TYPE_LEG_END)
 			continue;
 
-		pos = pci_find_capability(child, PCI_CAP_ID_EXP);
+		pos = pci_pcie_cap(child);
 		pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32);
 		/* Calculate endpoint L0s acceptable latency */
 		encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
@@ -436,7 +436,7 @@
 static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
 {
 	u16 reg16;
-	int pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+	int pos = pci_pcie_cap(pdev);
 
 	pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
 	reg16 &= ~0x3;
@@ -503,7 +503,7 @@
 	 * very strange. Disable ASPM for the whole slot
 	 */
 	list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
-		pos = pci_find_capability(child, PCI_CAP_ID_EXP);
+		pos = pci_pcie_cap(child);
 		if (!pos)
 			return -EINVAL;
 		/*
@@ -563,7 +563,7 @@
 	struct pcie_link_state *link;
 	int blacklist = !!pcie_aspm_sanity_check(pdev);
 
-	if (aspm_disabled || !pdev->is_pcie || pdev->link_state)
+	if (aspm_disabled || !pci_is_pcie(pdev) || pdev->link_state)
 		return;
 	if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
 	    pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
@@ -629,7 +629,8 @@
 	struct pci_dev *parent = pdev->bus->self;
 	struct pcie_link_state *link, *root, *parent_link;
 
-	if (aspm_disabled || !pdev->is_pcie || !parent || !parent->link_state)
+	if (aspm_disabled || !pci_is_pcie(pdev) ||
+	    !parent || !parent->link_state)
 		return;
 	if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
 	    (parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
@@ -670,7 +671,7 @@
 {
 	struct pcie_link_state *link = pdev->link_state;
 
-	if (aspm_disabled || !pdev->is_pcie || !link)
+	if (aspm_disabled || !pci_is_pcie(pdev) || !link)
 		return;
 	if ((pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
 	    (pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
@@ -696,7 +697,7 @@
 	struct pci_dev *parent = pdev->bus->self;
 	struct pcie_link_state *link;
 
-	if (aspm_disabled || !pdev->is_pcie)
+	if (aspm_disabled || !pci_is_pcie(pdev))
 		return;
 	if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
 	    pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)
@@ -841,8 +842,9 @@
 {
 	struct pcie_link_state *link_state = pdev->link_state;
 
-	if (!pdev->is_pcie || (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
-		pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
+	if (!pci_is_pcie(pdev) ||
+	    (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
+	     pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
 		return;
 
 	if (link_state->aspm_support)
@@ -857,8 +859,9 @@
 {
 	struct pcie_link_state *link_state = pdev->link_state;
 
-	if (!pdev->is_pcie || (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
-		pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
+	if (!pci_is_pcie(pdev) ||
+	    (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
+	     pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
 		return;
 
 	if (link_state->aspm_support)
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 17ad538..aaeb9d2 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -11,31 +11,16 @@
 
 #include <linux/compiler.h>
 
-#if !defined(PCI_CAP_ID_PME)
-#define PCI_CAP_ID_PME			1
-#endif
-
-#if !defined(PCI_CAP_ID_EXP)
-#define PCI_CAP_ID_EXP			0x10
-#endif
-
-#define PORT_TYPE_MASK			0xf
-#define PORT_TO_SLOT_MASK		0x100
-#define SLOT_HP_CAPABLE_MASK		0x40
-#define PCIE_CAPABILITIES_REG		0x2
-#define PCIE_SLOT_CAPABILITIES_REG	0x14
-#define PCIE_PORT_DEVICE_MAXSERVICES	4
-#define PCIE_PORT_MSI_VECTOR_MASK	0x1f
+#define PCIE_PORT_DEVICE_MAXSERVICES   4
 /*
- * According to the PCI Express Base Specification 2.0, the indices of the MSI-X
- * table entires used by port services must not exceed 31
+ * According to the PCI Express Base Specification 2.0, the indices of
+ * the MSI-X table entires used by port services must not exceed 31
  */
 #define PCIE_PORT_MAX_MSIX_ENTRIES	32
 
 #define get_descriptor_id(type, service) (((type - 4) << 4) | service)
 
 extern struct bus_type pcie_port_bus_type;
-extern int pcie_port_device_probe(struct pci_dev *dev);
 extern int pcie_port_device_register(struct pci_dev *dev);
 #ifdef CONFIG_PM
 extern int pcie_port_device_suspend(struct device *dev);
diff --git a/drivers/pci/pcie/portdrv_bus.c b/drivers/pci/pcie/portdrv_bus.c
index ef3a4ee..18bf90f 100644
--- a/drivers/pci/pcie/portdrv_bus.c
+++ b/drivers/pci/pcie/portdrv_bus.c
@@ -26,7 +26,6 @@
 static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
 {
 	struct pcie_device *pciedev;
-	struct pcie_port_data *port_data;
 	struct pcie_port_service_driver *driver;
 
 	if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type)
@@ -38,10 +37,8 @@
 	if (driver->service != pciedev->service)
 		return 0;
 
-	port_data = pci_get_drvdata(pciedev->port);
-
-	if (driver->port_type != PCIE_ANY_PORT
-	     && driver->port_type != port_data->port_type)
+	if ((driver->port_type != PCIE_ANY_PORT) &&
+	    (driver->port_type != pciedev->port->pcie_type))
 		return 0;
 
 	return 1;
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 52f84fc..413262e 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -108,9 +108,9 @@
 		 * the value in this field indicates which MSI-X Table entry is
 		 * used to generate the interrupt message."
 		 */
-		pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
-		pci_read_config_word(dev, pos + PCIE_CAPABILITIES_REG, &reg16);
-		entry = (reg16 >> 9) & PCIE_PORT_MSI_VECTOR_MASK;
+		pos = pci_pcie_cap(dev);
+		pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16);
+		entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
 		if (entry >= nr_entries)
 			goto Error;
 
@@ -177,37 +177,40 @@
 }
 
 /**
- * assign_interrupt_mode - choose interrupt mode for PCI Express port services
- *                         (INTx, MSI-X, MSI) and set up vectors
+ * init_service_irqs - initialize irqs for PCI Express port services
  * @dev: PCI Express port to handle
- * @vectors: Array of interrupt vectors to populate
+ * @irqs: Array of irqs to populate
  * @mask: Bitmask of port capabilities returned by get_port_device_capability()
  *
  * Return value: Interrupt mode associated with the port
  */
-static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask)
+static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
 {
-	int irq, interrupt_mode = PCIE_PORT_NO_IRQ;
-	int i;
+	int i, irq;
 
 	/* Try to use MSI-X if supported */
-	if (!pcie_port_enable_msix(dev, vectors, mask))
-		return PCIE_PORT_MSIX_MODE;
-
+	if (!pcie_port_enable_msix(dev, irqs, mask))
+		return 0;
 	/* We're not going to use MSI-X, so try MSI and fall back to INTx */
-	if (!pci_enable_msi(dev))
-		interrupt_mode = PCIE_PORT_MSI_MODE;
+	irq = -1;
+	if (!pci_enable_msi(dev) || dev->pin)
+		irq = dev->irq;
 
-	if (interrupt_mode == PCIE_PORT_NO_IRQ && dev->pin)
-		interrupt_mode = PCIE_PORT_INTx_MODE;
-
-	irq = interrupt_mode != PCIE_PORT_NO_IRQ ? dev->irq : -1;
 	for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
-		vectors[i] = irq;
+		irqs[i] = irq;
+	irqs[PCIE_PORT_SERVICE_VC_SHIFT] = -1;
 
-	vectors[PCIE_PORT_SERVICE_VC_SHIFT] = -1;
+	if (irq < 0)
+		return -ENODEV;
+	return 0;
+}
 
-	return interrupt_mode;
+static void cleanup_service_irqs(struct pci_dev *dev)
+{
+	if (dev->msix_enabled)
+		pci_disable_msix(dev);
+	else if (dev->msi_enabled)
+		pci_disable_msi(dev);
 }
 
 /**
@@ -226,13 +229,12 @@
 	u16 reg16;
 	u32 reg32;
 
-	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
-	pci_read_config_word(dev, pos + PCIE_CAPABILITIES_REG, &reg16);
+	pos = pci_pcie_cap(dev);
+	pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16);
 	/* Hot-Plug Capable */
-	if (reg16 & PORT_TO_SLOT_MASK) {
-		pci_read_config_dword(dev, 
-			pos + PCIE_SLOT_CAPABILITIES_REG, &reg32);
-		if (reg32 & SLOT_HP_CAPABLE_MASK)
+	if (reg16 & PCI_EXP_FLAGS_SLOT) {
+		pci_read_config_dword(dev, pos + PCI_EXP_SLTCAP, &reg32);
+		if (reg32 & PCI_EXP_SLTCAP_HPC)
 			services |= PCIE_PORT_SERVICE_HP;
 	}
 	/* AER capable */
@@ -241,80 +243,47 @@
 	/* VC support */
 	if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC))
 		services |= PCIE_PORT_SERVICE_VC;
+	/* Root ports are capable of generating PME too */
+	if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
+		services |= PCIE_PORT_SERVICE_PME;
 
 	return services;
 }
 
 /**
- * pcie_device_init - initialize PCI Express port service device
- * @dev: Port service device to initialize
- * @parent: PCI Express port to associate the service device with
- * @port_type: Type of the port
- * @service_type: Type of service to associate with the service device
+ * pcie_device_init - allocate and initialize PCI Express port service device
+ * @pdev: PCI Express port to associate the service device with
+ * @service: Type of service to associate with the service device
  * @irq: Interrupt vector to associate with the service device
  */
-static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev, 
-	int service_type, int irq)
+static int pcie_device_init(struct pci_dev *pdev, int service, int irq)
 {
-	struct pcie_port_data *port_data = pci_get_drvdata(parent);
+	int retval;
+	struct pcie_device *pcie;
 	struct device *device;
-	int port_type = port_data->port_type;
 
-	dev->port = parent;
-	dev->irq = irq;
-	dev->service = service_type;
+	pcie = kzalloc(sizeof(*pcie), GFP_KERNEL);
+	if (!pcie)
+		return -ENOMEM;
+	pcie->port = pdev;
+	pcie->irq = irq;
+	pcie->service = service;
 
 	/* Initialize generic device interface */
-	device = &dev->device;
-	memset(device, 0, sizeof(struct device));
+	device = &pcie->device;
 	device->bus = &pcie_port_bus_type;
-	device->driver = NULL;
-	dev_set_drvdata(device, NULL);
 	device->release = release_pcie_device;	/* callback to free pcie dev */
 	dev_set_name(device, "%s:pcie%02x",
-		 pci_name(parent), get_descriptor_id(port_type, service_type));
-	device->parent = &parent->dev;
-}
+		     pci_name(pdev),
+		     get_descriptor_id(pdev->pcie_type, service));
+	device->parent = &pdev->dev;
 
-/**
- * alloc_pcie_device - allocate PCI Express port service device structure
- * @parent: PCI Express port to associate the service device with
- * @port_type: Type of the port
- * @service_type: Type of service to associate with the service device
- * @irq: Interrupt vector to associate with the service device
- */
-static struct pcie_device* alloc_pcie_device(struct pci_dev *parent,
-	int service_type, int irq)
-{
-	struct pcie_device *device;
-
-	device = kzalloc(sizeof(struct pcie_device), GFP_KERNEL);
-	if (!device)
-		return NULL;
-
-	pcie_device_init(parent, device, service_type, irq);
-	return device;
-}
-
-/**
- * pcie_port_device_probe - check if device is a PCI Express port
- * @dev: Device to check
- */
-int pcie_port_device_probe(struct pci_dev *dev)
-{
-	int pos, type;
-	u16 reg;
-
-	if (!(pos = pci_find_capability(dev, PCI_CAP_ID_EXP)))
-		return -ENODEV;
-
-	pci_read_config_word(dev, pos + PCIE_CAPABILITIES_REG, &reg);
-	type = (reg >> 4) & PORT_TYPE_MASK;
-	if (	type == PCIE_RC_PORT || type == PCIE_SW_UPSTREAM_PORT ||
-		type == PCIE_SW_DOWNSTREAM_PORT )
-		return 0;
-
-	return -ENODEV;
+	retval = device_register(device);
+	if (retval)
+		kfree(pcie);
+	else
+		get_device(device);
+	return retval;
 }
 
 /**
@@ -326,77 +295,49 @@
  */
 int pcie_port_device_register(struct pci_dev *dev)
 {
-	struct pcie_port_data *port_data;
-	int status, capabilities, irq_mode, i, nr_serv;
-	int vectors[PCIE_PORT_DEVICE_MAXSERVICES];
-	u16 reg16;
+	int status, capabilities, i, nr_service;
+	int irqs[PCIE_PORT_DEVICE_MAXSERVICES];
 
-	port_data = kzalloc(sizeof(*port_data), GFP_KERNEL);
-	if (!port_data)
-		return -ENOMEM;
-	pci_set_drvdata(dev, port_data);
-
-	/* Get port type */
-	pci_read_config_word(dev,
-		pci_find_capability(dev, PCI_CAP_ID_EXP) +
-		PCIE_CAPABILITIES_REG, &reg16);
-	port_data->port_type = (reg16 >> 4) & PORT_TYPE_MASK;
-
+	/* Get and check PCI Express port services */
 	capabilities = get_port_device_capability(dev);
-	/* Root ports are capable of generating PME too */
-	if (port_data->port_type == PCIE_RC_PORT)
-		capabilities |= PCIE_PORT_SERVICE_PME;
+	if (!capabilities)
+		return -ENODEV;
 
-	irq_mode = assign_interrupt_mode(dev, vectors, capabilities);
-	if (irq_mode == PCIE_PORT_NO_IRQ) {
-		/*
-		 * Don't use service devices that require interrupts if there is
-		 * no way to generate them.
-		 */
-		if (!(capabilities & PCIE_PORT_SERVICE_VC)) {
-			status = -ENODEV;
-			goto Error;
-		}
-		capabilities = PCIE_PORT_SERVICE_VC;
-	}
-	port_data->port_irq_mode = irq_mode;
-
+	/* Enable PCI Express port device */
 	status = pci_enable_device(dev);
 	if (status)
-		goto Error;
+		return status;
 	pci_set_master(dev);
+	/*
+	 * Initialize service irqs. Don't use service devices that
+	 * require interrupts if there is no way to generate them.
+	 */
+	status = init_service_irqs(dev, irqs, capabilities);
+	if (status) {
+		capabilities &= PCIE_PORT_SERVICE_VC;
+		if (!capabilities)
+			goto error_disable;
+	}
 
 	/* Allocate child services if any */
-	for (i = 0, nr_serv = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
-		struct pcie_device *child;
+	status = -ENODEV;
+	nr_service = 0;
+	for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
 		int service = 1 << i;
-
 		if (!(capabilities & service))
 			continue;
-
-		child = alloc_pcie_device(dev, service, vectors[i]);
-		if (!child)
-			continue;
-
-		status = device_register(&child->device);
-		if (status) {
-			kfree(child);
-			continue;
-		}
-
-		get_device(&child->device);
-		nr_serv++;
+		if (!pcie_device_init(dev, service, irqs[i]))
+			nr_service++;
 	}
-	if (!nr_serv) {
-		pci_disable_device(dev);
-		status = -ENODEV;
-		goto Error;
-	}
+	if (!nr_service)
+		goto error_cleanup_irqs;
 
 	return 0;
 
- Error:
-	kfree(port_data);
+error_cleanup_irqs:
+	cleanup_service_irqs(dev);
+error_disable:
+	pci_disable_device(dev);
 	return status;
 }
 
@@ -464,21 +405,9 @@
  */
 void pcie_port_device_remove(struct pci_dev *dev)
 {
-	struct pcie_port_data *port_data = pci_get_drvdata(dev);
-
 	device_for_each_child(&dev->dev, NULL, remove_iter);
+	cleanup_service_irqs(dev);
 	pci_disable_device(dev);
-
-	switch (port_data->port_irq_mode) {
-	case PCIE_PORT_MSIX_MODE:
-		pci_disable_msix(dev);
-		break;
-	case PCIE_PORT_MSI_MODE:
-		pci_disable_msi(dev);
-		break;
-	}
-
-	kfree(port_data);
 }
 
 /**
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index f635e47..ce52ea3 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -67,14 +67,16 @@
  * this port device.
  *
  */
-static int __devinit pcie_portdrv_probe (struct pci_dev *dev, 
-				const struct pci_device_id *id )
+static int __devinit pcie_portdrv_probe(struct pci_dev *dev,
+					const struct pci_device_id *id)
 {
-	int			status;
+	int status;
 
-	status = pcie_port_device_probe(dev);
-	if (status)
-		return status;
+	if (!pci_is_pcie(dev) ||
+	    ((dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
+	     (dev->pcie_type != PCI_EXP_TYPE_UPSTREAM) &&
+	     (dev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)))
+		return -ENODEV;
 
         if (!dev->irq && dev->pin) {
 		dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; "
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 8105e32..98ffb2d 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -10,6 +10,7 @@
 #include <linux/module.h>
 #include <linux/cpumask.h>
 #include <linux/pci-aspm.h>
+#include <acpi/acpi_hest.h>
 #include "pci.h"
 
 #define CARDBUS_LATENCY_TIMER	176	/* secondary latency timer */
@@ -163,12 +164,12 @@
 {
 	u32 l, sz, mask;
 
-	mask = type ? ~PCI_ROM_ADDRESS_ENABLE : ~0;
+	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
 
 	res->name = pci_name(dev);
 
 	pci_read_config_dword(dev, pos, &l);
-	pci_write_config_dword(dev, pos, mask);
+	pci_write_config_dword(dev, pos, l | mask);
 	pci_read_config_dword(dev, pos, &sz);
 	pci_write_config_dword(dev, pos, l);
 
@@ -223,9 +224,13 @@
 			goto fail;
 
 		if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
-			dev_err(&dev->dev, "can't handle 64-bit BAR\n");
+			dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n",
+				pos);
 			goto fail;
-		} else if ((sizeof(resource_size_t) < 8) && l) {
+		}
+
+		res->flags |= IORESOURCE_MEM_64;
+		if ((sizeof(resource_size_t) < 8) && l) {
 			/* Address above 32-bit boundary; disable the BAR */
 			pci_write_config_dword(dev, pos, 0);
 			pci_write_config_dword(dev, pos + 4, 0);
@@ -234,14 +239,9 @@
 		} else {
 			res->start = l64;
 			res->end = l64 + sz64;
-			dev_printk(KERN_DEBUG, &dev->dev,
-				"reg %x %s: %pR\n", pos,
-				 (res->flags & IORESOURCE_PREFETCH) ?
-					"64bit mmio pref" : "64bit mmio",
-				 res);
+			dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n",
+				   pos, res);
 		}
-
-		res->flags |= IORESOURCE_MEM_64;
 	} else {
 		sz = pci_size(l, sz, mask);
 
@@ -251,11 +251,7 @@
 		res->start = l;
 		res->end = l + sz;
 
-		dev_printk(KERN_DEBUG, &dev->dev, "reg %x %s: %pR\n", pos,
-			(res->flags & IORESOURCE_IO) ? "io port" :
-			 ((res->flags & IORESOURCE_PREFETCH) ?
-				 "32bit mmio pref" : "32bit mmio"),
-			res);
+		dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res);
 	}
 
  out:
@@ -297,8 +293,11 @@
 	if (pci_is_root_bus(child))	/* It's a host bus, nothing to read */
 		return;
 
+	dev_info(&dev->dev, "PCI bridge to [bus %02x-%02x]%s\n",
+		 child->secondary, child->subordinate,
+		 dev->transparent ? " (subtractive decode)": "");
+
 	if (dev->transparent) {
-		dev_info(&dev->dev, "transparent bridge\n");
 		for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++)
 			child->resource[i] = child->parent->resource[i - 3];
 	}
@@ -323,7 +322,7 @@
 			res->start = base;
 		if (!res->end)
 			res->end = limit + 0xfff;
-		dev_printk(KERN_DEBUG, &dev->dev, "bridge io port: %pR\n", res);
+		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
 	}
 
 	res = child->resource[1];
@@ -335,8 +334,7 @@
 		res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
 		res->start = base;
 		res->end = limit + 0xfffff;
-		dev_printk(KERN_DEBUG, &dev->dev, "bridge 32bit mmio: %pR\n",
-			res);
+		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
 	}
 
 	res = child->resource[2];
@@ -375,9 +373,7 @@
 			res->flags |= IORESOURCE_MEM_64;
 		res->start = base;
 		res->end = limit + 0xfffff;
-		dev_printk(KERN_DEBUG, &dev->dev, "bridge %sbit mmio pref: %pR\n",
-			(res->flags & PCI_PREF_RANGE_TYPE_64) ? "64" : "32",
-			res);
+		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
 	}
 }
 
@@ -651,13 +647,14 @@
 		    (child->number > bus->subordinate) ||
 		    (child->number < bus->number) ||
 		    (child->subordinate < bus->number)) {
-			pr_debug("PCI: Bus #%02x (-#%02x) is %s "
-				"hidden behind%s bridge #%02x (-#%02x)\n",
+			dev_info(&child->dev, "[bus %02x-%02x] %s "
+				"hidden behind%s bridge %s [bus %02x-%02x]\n",
 				child->number, child->subordinate,
 				(bus->number > child->subordinate &&
 				 bus->subordinate < child->number) ?
 					"wholly" : "partially",
 				bus->self->transparent ? " transparent" : "",
+				dev_name(&bus->dev),
 				bus->number, bus->subordinate);
 		}
 		bus = bus->parent;
@@ -693,6 +690,7 @@
 	if (!pos)
 		return;
 	pdev->is_pcie = 1;
+	pdev->pcie_cap = pos;
 	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
 	pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
 }
@@ -703,7 +701,7 @@
 	u16 reg16;
 	u32 reg32;
 
-	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+	pos = pci_pcie_cap(pdev);
 	if (!pos)
 		return;
 	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
@@ -714,6 +712,12 @@
 		pdev->is_hotplug_bridge = 1;
 }
 
+static void set_pci_aer_firmware_first(struct pci_dev *pdev)
+{
+	if (acpi_hest_firmware_first_pci(pdev))
+		pdev->aer_firmware_first = 1;
+}
+
 #define LEGACY_IO_RESOURCE	(IORESOURCE_IO | IORESOURCE_PCI_FIXED)
 
 /**
@@ -731,6 +735,7 @@
 	u32 class;
 	u8 hdr_type;
 	struct pci_slot *slot;
+	int pos = 0;
 
 	if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
 		return -EIO;
@@ -742,6 +747,7 @@
 	dev->multifunction = !!(hdr_type & 0x80);
 	dev->error_state = pci_channel_io_normal;
 	set_pcie_port_type(dev);
+	set_pci_aer_firmware_first(dev);
 
 	list_for_each_entry(slot, &dev->bus->slots, list)
 		if (PCI_SLOT(dev->devfn) == slot->number)
@@ -822,6 +828,11 @@
 		dev->transparent = ((dev->class & 0xff) == 1);
 		pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
 		set_pcie_hotplug_bridge(dev);
+		pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
+		if (pos) {
+			pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
+			pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
+		}
 		break;
 
 	case PCI_HEADER_TYPE_CARDBUS:		    /* CardBus bridge header */
@@ -907,7 +918,7 @@
 	if (class == PCI_CLASS_BRIDGE_HOST)
 		return pci_cfg_space_size_ext(dev);
 
-	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+	pos = pci_pcie_cap(dev);
 	if (!pos) {
 		pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
 		if (!pos)
@@ -1014,6 +1025,9 @@
 
 	/* Single Root I/O Virtualization */
 	pci_iov_init(dev);
+
+	/* Enable ACS P2P upstream forwarding */
+	pci_enable_acs(dev);
 }
 
 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
@@ -1110,7 +1124,7 @@
 	unsigned int devfn, pass, max = bus->secondary;
 	struct pci_dev *dev;
 
-	pr_debug("PCI: Scanning bus %04x:%02x\n", pci_domain_nr(bus), bus->number);
+	dev_dbg(&bus->dev, "scanning bus\n");
 
 	/* Go find them, Rover! */
 	for (devfn = 0; devfn < 0x100; devfn += 8)
@@ -1124,8 +1138,7 @@
 	 * all PCI-to-PCI bridges on this bus.
 	 */
 	if (!bus->is_added) {
-		pr_debug("PCI: Fixups for bus %04x:%02x\n",
-			 pci_domain_nr(bus), bus->number);
+		dev_dbg(&bus->dev, "fixups for bus\n");
 		pcibios_fixup_bus(bus);
 		if (pci_is_root_bus(bus))
 			bus->is_added = 1;
@@ -1145,8 +1158,7 @@
 	 *
 	 * Return how far we've got finding sub-buses.
 	 */
-	pr_debug("PCI: Bus scan for %04x:%02x returning with max=%02x\n",
-		pci_domain_nr(bus), bus->number, max);
+	dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
 	return max;
 }
 
@@ -1154,7 +1166,7 @@
 		int bus, struct pci_ops *ops, void *sysdata)
 {
 	int error;
-	struct pci_bus *b;
+	struct pci_bus *b, *b2;
 	struct device *dev;
 
 	b = pci_alloc_bus();
@@ -1170,9 +1182,10 @@
 	b->sysdata = sysdata;
 	b->ops = ops;
 
-	if (pci_find_bus(pci_domain_nr(b), bus)) {
+	b2 = pci_find_bus(pci_domain_nr(b), bus);
+	if (b2) {
 		/* If we already got to this bus through a different bridge, ignore it */
-		pr_debug("PCI: Bus %04x:%02x already known\n", pci_domain_nr(b), bus);
+		dev_dbg(&b2->dev, "bus already known\n");
 		goto err_out;
 	}
 
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 245d2cd..7cfa7c3 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -357,7 +357,7 @@
 		pcibios_bus_to_resource(dev, res, &bus_region);
 
 		pci_claim_resource(dev, nr);
-		dev_info(&dev->dev, "quirk: region %04x-%04x claimed by %s\n", region, region + size - 1, name);
+		dev_info(&dev->dev, "quirk: %pR claimed by %s\n", res, name);
 	}
 }	
 
@@ -1680,6 +1680,7 @@
  */
 #define AMD_813X_MISC			0x40
 #define AMD_813X_NOIOAMODE		(1<<0)
+#define AMD_813X_REV_B1			0x12
 #define AMD_813X_REV_B2			0x13
 
 static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
@@ -1688,7 +1689,8 @@
 
 	if (noioapicquirk)
 		return;
-	if (dev->revision == AMD_813X_REV_B2)
+	if ((dev->revision == AMD_813X_REV_B1) ||
+	    (dev->revision == AMD_813X_REV_B2))
 		return;
 
 	pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword);
@@ -1698,8 +1700,10 @@
 	dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
 		 dev->vendor, dev->device);
 }
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,   PCI_DEVICE_ID_AMD_8131_BRIDGE, 	quirk_disable_amd_813x_boot_interrupt);
-DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD,   PCI_DEVICE_ID_AMD_8132_BRIDGE, 	quirk_disable_amd_813x_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_8131_BRIDGE,	quirk_disable_amd_813x_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_8131_BRIDGE,	quirk_disable_amd_813x_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_8132_BRIDGE,	quirk_disable_amd_813x_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_8132_BRIDGE,	quirk_disable_amd_813x_boot_interrupt);
 
 #define AMD_8111_PCI_IRQ_ROUTING	0x56
 
@@ -2595,9 +2599,37 @@
 static int __init pci_apply_final_quirks(void)
 {
 	struct pci_dev *dev = NULL;
+	u8 cls = 0;
+	u8 tmp;
+
+	if (pci_cache_line_size)
+		printk(KERN_DEBUG "PCI: CLS %u bytes\n",
+		       pci_cache_line_size << 2);
 
 	while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
 		pci_fixup_device(pci_fixup_final, dev);
+		/*
+		 * If arch hasn't set it explicitly yet, use the CLS
+		 * value shared by all PCI devices.  If there's a
+		 * mismatch, fall back to the default value.
+		 */
+		if (!pci_cache_line_size) {
+			pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp);
+			if (!cls)
+				cls = tmp;
+			if (!tmp || cls == tmp)
+				continue;
+
+			printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), "
+			       "using %u bytes\n", cls << 2, tmp << 2,
+			       pci_dfl_cache_line_size << 2);
+			pci_cache_line_size = pci_dfl_cache_line_size;
+		}
+	}
+	if (!pci_cache_line_size) {
+		printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n",
+		       cls << 2, pci_dfl_cache_line_size << 2);
+		pci_cache_line_size = cls;
 	}
 
 	return 0;
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index ec41535..6dae871 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -26,14 +26,14 @@
 {
 	struct pci_dev *tmp = NULL;
 
-	if (pdev->is_pcie)
+	if (pci_is_pcie(pdev))
 		return NULL;
 	while (1) {
 		if (pci_is_root_bus(pdev->bus))
 			break;
 		pdev = pdev->bus->self;
 		/* a p2p bridge */
-		if (!pdev->is_pcie) {
+		if (!pci_is_pcie(pdev)) {
 			tmp = pdev;
 			continue;
 		}
@@ -149,32 +149,33 @@
 }
 
 /**
- * pci_get_bus_and_slot - locate PCI device from a given PCI bus & slot
- * @bus: number of PCI bus on which desired PCI device resides
- * @devfn: encodes number of PCI slot in which the desired PCI
- * device resides and the logical device number within that slot
- * in case of multi-function devices.
+ * pci_get_domain_bus_and_slot - locate PCI device for a given PCI domain (segment), bus, and slot
+ * @domain: PCI domain/segment on which the PCI device resides.
+ * @bus: PCI bus on which desired PCI device resides
+ * @devfn: encodes number of PCI slot in which the desired PCI device
+ * resides and the logical device number within that slot in case of
+ * multi-function devices.
  *
- * Note: the bus/slot search is limited to PCI domain (segment) 0.
- *
- * Given a PCI bus and slot/function number, the desired PCI device
- * is located in system global list of PCI devices.  If the device
- * is found, a pointer to its data structure is returned.  If no
- * device is found, %NULL is returned. The returned device has its
- * reference count bumped by one.
+ * Given a PCI domain, bus, and slot/function number, the desired PCI
+ * device is located in the list of PCI devices. If the device is
+ * found, its reference count is increased and this function returns a
+ * pointer to its data structure.  The caller must decrement the
+ * reference count by calling pci_dev_put().  If no device is found,
+ * %NULL is returned.
  */
-
-struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn)
+struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
+					    unsigned int devfn)
 {
 	struct pci_dev *dev = NULL;
 
 	while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
-		if (pci_domain_nr(dev->bus) == 0 &&
-		   (dev->bus->number == bus && dev->devfn == devfn))
+		if (pci_domain_nr(dev->bus) == domain &&
+		    (dev->bus->number == bus && dev->devfn == devfn))
 			return dev;
 	}
 	return NULL;
 }
+EXPORT_SYMBOL(pci_get_domain_bus_and_slot);
 
 static int match_pci_dev_by_id(struct device *dev, void *data)
 {
@@ -354,5 +355,4 @@
 EXPORT_SYMBOL(pci_get_device);
 EXPORT_SYMBOL(pci_get_subsys);
 EXPORT_SYMBOL(pci_get_slot);
-EXPORT_SYMBOL(pci_get_bus_and_slot);
 EXPORT_SYMBOL(pci_get_class);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index cb1a027..c48cd37 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -71,53 +71,50 @@
 void pci_setup_cardbus(struct pci_bus *bus)
 {
 	struct pci_dev *bridge = bus->self;
+	struct resource *res;
 	struct pci_bus_region region;
 
-	dev_info(&bridge->dev, "CardBus bridge, secondary bus %04x:%02x\n",
-		 pci_domain_nr(bus), bus->number);
+	dev_info(&bridge->dev, "CardBus bridge to [bus %02x-%02x]\n",
+		 bus->secondary, bus->subordinate);
 
-	pcibios_resource_to_bus(bridge, &region, bus->resource[0]);
-	if (bus->resource[0]->flags & IORESOURCE_IO) {
+	res = bus->resource[0];
+	pcibios_resource_to_bus(bridge, &region, res);
+	if (res->flags & IORESOURCE_IO) {
 		/*
 		 * The IO resource is allocated a range twice as large as it
 		 * would normally need.  This allows us to set both IO regs.
 		 */
-		dev_info(&bridge->dev, "  IO window: %#08lx-%#08lx\n",
-		       (unsigned long)region.start,
-		       (unsigned long)region.end);
+		dev_info(&bridge->dev, "  bridge window %pR\n", res);
 		pci_write_config_dword(bridge, PCI_CB_IO_BASE_0,
 					region.start);
 		pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0,
 					region.end);
 	}
 
-	pcibios_resource_to_bus(bridge, &region, bus->resource[1]);
-	if (bus->resource[1]->flags & IORESOURCE_IO) {
-		dev_info(&bridge->dev, "  IO window: %#08lx-%#08lx\n",
-		       (unsigned long)region.start,
-		       (unsigned long)region.end);
+	res = bus->resource[1];
+	pcibios_resource_to_bus(bridge, &region, res);
+	if (res->flags & IORESOURCE_IO) {
+		dev_info(&bridge->dev, "  bridge window %pR\n", res);
 		pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
 					region.start);
 		pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1,
 					region.end);
 	}
 
-	pcibios_resource_to_bus(bridge, &region, bus->resource[2]);
-	if (bus->resource[2]->flags & IORESOURCE_MEM) {
-		dev_info(&bridge->dev, "  PREFETCH window: %#08lx-%#08lx\n",
-		       (unsigned long)region.start,
-		       (unsigned long)region.end);
+	res = bus->resource[2];
+	pcibios_resource_to_bus(bridge, &region, res);
+	if (res->flags & IORESOURCE_MEM) {
+		dev_info(&bridge->dev, "  bridge window %pR\n", res);
 		pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
 					region.start);
 		pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0,
 					region.end);
 	}
 
-	pcibios_resource_to_bus(bridge, &region, bus->resource[3]);
-	if (bus->resource[3]->flags & IORESOURCE_MEM) {
-		dev_info(&bridge->dev, "  MEM window: %#08lx-%#08lx\n",
-		       (unsigned long)region.start,
-		       (unsigned long)region.end);
+	res = bus->resource[3];
+	pcibios_resource_to_bus(bridge, &region, res);
+	if (res->flags & IORESOURCE_MEM) {
+		dev_info(&bridge->dev, "  bridge window %pR\n", res);
 		pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
 					region.start);
 		pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1,
@@ -140,34 +137,33 @@
 static void pci_setup_bridge(struct pci_bus *bus)
 {
 	struct pci_dev *bridge = bus->self;
+	struct resource *res;
 	struct pci_bus_region region;
 	u32 l, bu, lu, io_upper16;
-	int pref_mem64;
 
 	if (pci_is_enabled(bridge))
 		return;
 
-	dev_info(&bridge->dev, "PCI bridge, secondary bus %04x:%02x\n",
-		 pci_domain_nr(bus), bus->number);
+	dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n",
+		 bus->secondary, bus->subordinate);
 
 	/* Set up the top and bottom of the PCI I/O segment for this bus. */
-	pcibios_resource_to_bus(bridge, &region, bus->resource[0]);
-	if (bus->resource[0]->flags & IORESOURCE_IO) {
+	res = bus->resource[0];
+	pcibios_resource_to_bus(bridge, &region, res);
+	if (res->flags & IORESOURCE_IO) {
 		pci_read_config_dword(bridge, PCI_IO_BASE, &l);
 		l &= 0xffff0000;
 		l |= (region.start >> 8) & 0x00f0;
 		l |= region.end & 0xf000;
 		/* Set up upper 16 bits of I/O base/limit. */
 		io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
-		dev_info(&bridge->dev, "  IO window: %#04lx-%#04lx\n",
-		    (unsigned long)region.start,
-		    (unsigned long)region.end);
+		dev_info(&bridge->dev, "  bridge window %pR\n", res);
 	}
 	else {
 		/* Clear upper 16 bits of I/O base/limit. */
 		io_upper16 = 0;
 		l = 0x00f0;
-		dev_info(&bridge->dev, "  IO window: disabled\n");
+		dev_info(&bridge->dev, "  bridge window [io  disabled]\n");
 	}
 	/* Temporarily disable the I/O range before updating PCI_IO_BASE. */
 	pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
@@ -178,17 +174,16 @@
 
 	/* Set up the top and bottom of the PCI Memory segment
 	   for this bus. */
-	pcibios_resource_to_bus(bridge, &region, bus->resource[1]);
-	if (bus->resource[1]->flags & IORESOURCE_MEM) {
+	res = bus->resource[1];
+	pcibios_resource_to_bus(bridge, &region, res);
+	if (res->flags & IORESOURCE_MEM) {
 		l = (region.start >> 16) & 0xfff0;
 		l |= region.end & 0xfff00000;
-		dev_info(&bridge->dev, "  MEM window: %#08lx-%#08lx\n",
-		    (unsigned long)region.start,
-		    (unsigned long)region.end);
+		dev_info(&bridge->dev, "  bridge window %pR\n", res);
 	}
 	else {
 		l = 0x0000fff0;
-		dev_info(&bridge->dev, "  MEM window: disabled\n");
+		dev_info(&bridge->dev, "  bridge window [mem disabled]\n");
 	}
 	pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
 
@@ -198,34 +193,27 @@
 	pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
 
 	/* Set up PREF base/limit. */
-	pref_mem64 = 0;
 	bu = lu = 0;
-	pcibios_resource_to_bus(bridge, &region, bus->resource[2]);
-	if (bus->resource[2]->flags & IORESOURCE_PREFETCH) {
-		int width = 8;
+	res = bus->resource[2];
+	pcibios_resource_to_bus(bridge, &region, res);
+	if (res->flags & IORESOURCE_PREFETCH) {
 		l = (region.start >> 16) & 0xfff0;
 		l |= region.end & 0xfff00000;
-		if (bus->resource[2]->flags & IORESOURCE_MEM_64) {
-			pref_mem64 = 1;
+		if (res->flags & IORESOURCE_MEM_64) {
 			bu = upper_32_bits(region.start);
 			lu = upper_32_bits(region.end);
-			width = 16;
 		}
-		dev_info(&bridge->dev, "  PREFETCH window: %#0*llx-%#0*llx\n",
-				width, (unsigned long long)region.start,
-				width, (unsigned long long)region.end);
+		dev_info(&bridge->dev, "  bridge window %pR\n", res);
 	}
 	else {
 		l = 0x0000fff0;
-		dev_info(&bridge->dev, "  PREFETCH window: disabled\n");
+		dev_info(&bridge->dev, "  bridge window [mem pref disabled]\n");
 	}
 	pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
 
-	if (pref_mem64) {
-		/* Set the upper 32 bits of PREF base & limit. */
-		pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
-		pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
-	}
+	/* Set the upper 32 bits of PREF base & limit. */
+	pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
+	pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
 
 	pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
 }
@@ -345,6 +333,10 @@
 #endif
 	size = ALIGN(size + size1, 4096);
 	if (!size) {
+		if (b_res->start || b_res->end)
+			dev_info(&bus->self->dev, "disabling bridge window "
+				 "%pR to [bus %02x-%02x] (unused)\n", b_res,
+				 bus->secondary, bus->subordinate);
 		b_res->flags = 0;
 		return;
 	}
@@ -390,8 +382,9 @@
 			align = pci_resource_alignment(dev, r);
 			order = __ffs(align) - 20;
 			if (order > 11) {
-				dev_warn(&dev->dev, "BAR %d bad alignment %llx: "
-					 "%pR\n", i, (unsigned long long)align, r);
+				dev_warn(&dev->dev, "disabling BAR %d: %pR "
+					 "(bad alignment %#llx)\n", i, r,
+					 (unsigned long long) align);
 				r->flags = 0;
 				continue;
 			}
@@ -425,6 +418,10 @@
 	}
 	size = ALIGN(size, min_align);
 	if (!size) {
+		if (b_res->start || b_res->end)
+			dev_info(&bus->self->dev, "disabling bridge window "
+				 "%pR to [bus %02x-%02x] (unused)\n", b_res,
+				 bus->secondary, bus->subordinate);
 		b_res->flags = 0;
 		return 1;
 	}
@@ -582,10 +579,7 @@
                 if (!res || !res->end)
                         continue;
 
-		dev_printk(KERN_DEBUG, &bus->dev, "resource %d %s %pR\n", i,
-			   (res->flags & IORESOURCE_IO) ? "io: " :
-			    ((res->flags & IORESOURCE_PREFETCH)? "pref mem":"mem:"),
-			   res);
+		dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res);
         }
 }
 
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index c54526b..7d678bb 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -51,12 +51,6 @@
 
 	pcibios_resource_to_bus(dev, &region, res);
 
-	dev_dbg(&dev->dev, "BAR %d: got res %pR bus [%#llx-%#llx] "
-		"flags %#lx\n", resno, res,
-		 (unsigned long long)region.start,
-		 (unsigned long long)region.end,
-		 (unsigned long)res->flags);
-
 	new = region.start | (res->flags & PCI_REGION_FLAG_MASK);
 	if (res->flags & IORESOURCE_IO)
 		mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
@@ -91,9 +85,9 @@
 		}
 	}
 	res->flags &= ~IORESOURCE_UNSET;
-	dev_dbg(&dev->dev, "BAR %d: moved to bus [%#llx-%#llx] flags %#lx\n",
-		resno, (unsigned long long)region.start,
-		(unsigned long long)region.end, res->flags);
+	dev_info(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx]\n",
+		 resno, res, (unsigned long long)region.start,
+		 (unsigned long long)region.end);
 }
 
 int pci_claim_resource(struct pci_dev *dev, int resource)
@@ -103,20 +97,17 @@
 	int err;
 
 	root = pci_find_parent_resource(dev, res);
-
-	err = -EINVAL;
-	if (root != NULL)
-		err = request_resource(root, res);
-
-	if (err) {
-		const char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge";
-		dev_err(&dev->dev, "BAR %d: %s of %s %pR\n",
-			resource,
-			root ? "address space collision on" :
-				"no parent found for",
-			dtype, res);
+	if (!root) {
+		dev_err(&dev->dev, "no compatible bridge window for %pR\n",
+			res);
+		return -EINVAL;
 	}
 
+	err = request_resource(root, res);
+	if (err)
+		dev_err(&dev->dev,
+			"address space collision: %pR already in use\n", res);
+
 	return err;
 }
 EXPORT_SYMBOL(pci_claim_resource);
@@ -124,7 +115,7 @@
 #ifdef CONFIG_PCI_QUIRKS
 void pci_disable_bridge_window(struct pci_dev *dev)
 {
-	dev_dbg(&dev->dev, "Disabling bridge window.\n");
+	dev_info(&dev->dev, "disabling bridge mem windows\n");
 
 	/* MMIO Base/Limit */
 	pci_write_config_dword(dev, PCI_MEMORY_BASE, 0x0000fff0);
@@ -165,6 +156,7 @@
 
 	if (!ret) {
 		res->flags &= ~IORESOURCE_STARTALIGN;
+		dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
 		if (resno < PCI_BRIDGE_RESOURCES)
 			pci_update_resource(dev, resno);
 	}
@@ -178,12 +170,12 @@
 	resource_size_t align;
 	struct pci_bus *bus;
 	int ret;
+	char *type;
 
 	align = pci_resource_alignment(dev, res);
 	if (!align) {
-		dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
-			"alignment) %pR flags %#lx\n",
-			resno, res, res->flags);
+		dev_info(&dev->dev, "BAR %d: can't assign %pR "
+			 "(bogus alignment)\n", resno, res);
 		return -EINVAL;
 	}
 
@@ -198,9 +190,20 @@
 		break;
 	}
 
-	if (ret)
-		dev_info(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
-			resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
+	if (ret) {
+		if (res->flags & IORESOURCE_MEM)
+			if (res->flags & IORESOURCE_PREFETCH)
+				type = "mem pref";
+			else
+				type = "mem";
+		else if (res->flags & IORESOURCE_IO)
+			type = "io";
+		else
+			type = "unknown";
+		dev_info(&dev->dev,
+			 "BAR %d: can't assign %s (size %#llx)\n",
+			 resno, type, (unsigned long long) resource_size(res));
+	}
 
 	return ret;
 }
@@ -225,9 +228,8 @@
 
 		r_align = pci_resource_alignment(dev, r);
 		if (!r_align) {
-			dev_warn(&dev->dev, "BAR %d: bogus alignment "
-				"%pR flags %#lx\n",
-				i, r, r->flags);
+			dev_warn(&dev->dev, "BAR %d: %pR has bogus alignment\n",
+				 i, r);
 			continue;
 		}
 		for (list = head; ; list = list->next) {
@@ -274,8 +276,8 @@
 			continue;
 
 		if (!r->parent) {
-			dev_err(&dev->dev, "device not available because of "
-				"BAR %d %pR collisions\n", i, r);
+			dev_err(&dev->dev, "device not available "
+				"(can't reserve %pR)\n", r);
 			return -EINVAL;
 		}
 
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index 4cd70d0..a73b040 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -184,26 +184,33 @@
     
 =====================================================================*/
 
-/*
- * Since there is only one interrupt available to CardBus
- * devices, all devices downstream of this device must
- * be using this IRQ.
- */
-static void cardbus_assign_irqs(struct pci_bus *bus, int irq)
+static void cardbus_config_irq_and_cls(struct pci_bus *bus, int irq)
 {
 	struct pci_dev *dev;
 
 	list_for_each_entry(dev, &bus->devices, bus_list) {
 		u8 irq_pin;
 
+		/*
+		 * Since there is only one interrupt available to
+		 * CardBus devices, all devices downstream of this
+		 * device must be using this IRQ.
+		 */
 		pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq_pin);
 		if (irq_pin) {
 			dev->irq = irq;
 			pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
 		}
 
+		/*
+		 * Some controllers transfer very slowly with 0 CLS.
+		 * Configure it.  This may fail as CLS configuration
+		 * is mandatory only for MWI.
+		 */
+		pci_set_cacheline_size(dev);
+
 		if (dev->subordinate)
-			cardbus_assign_irqs(dev->subordinate, irq);
+			cardbus_config_irq_and_cls(dev->subordinate, irq);
 	}
 }
 
@@ -228,7 +235,7 @@
 	 */
 	pci_bus_size_bridges(bus);
 	pci_bus_assign_resources(bus);
-	cardbus_assign_irqs(bus, s->pci_irq);
+	cardbus_config_irq_and_cls(bus, s->pci_irq);
 
 	/* socket specific tune function */
 	if (s->tune_bridge)
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index 8473fe5..dfbd5a6 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -285,15 +285,10 @@
 				 * the PCI region, and that might prevent a PCI
 				 * driver from requesting its resources.
 				 */
-				dev_warn(&dev->dev, "%s resource "
-					"(0x%llx-0x%llx) overlaps %s BAR %d "
-					"(0x%llx-0x%llx), disabling\n",
-					pnp_resource_type_name(res),
-					(unsigned long long) pnp_start,
-					(unsigned long long) pnp_end,
-					pci_name(pdev), i,
-					(unsigned long long) pci_start,
-					(unsigned long long) pci_end);
+				dev_warn(&dev->dev,
+					 "disabling %pR because it overlaps "
+					 "%s BAR %d %pR\n", res,
+					 pci_name(pdev), i, &pdev->resource[i]);
 				res->flags |= IORESOURCE_DISABLED;
 			}
 		}
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index ba97654..64d0596 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -517,7 +517,7 @@
 	res->start = irq;
 	res->end = irq;
 
-	pnp_dbg(&dev->dev, "  add irq %d flags %#x\n", irq, flags);
+	pnp_dbg(&dev->dev, "  add %pr\n", res);
 	return pnp_res;
 }
 
@@ -538,7 +538,7 @@
 	res->start = dma;
 	res->end = dma;
 
-	pnp_dbg(&dev->dev, "  add dma %d flags %#x\n", dma, flags);
+	pnp_dbg(&dev->dev, "  add %pr\n", res);
 	return pnp_res;
 }
 
@@ -562,8 +562,7 @@
 	res->start = start;
 	res->end = end;
 
-	pnp_dbg(&dev->dev, "  add io  %#llx-%#llx flags %#x\n",
-		(unsigned long long) start, (unsigned long long) end, flags);
+	pnp_dbg(&dev->dev, "  add %pr\n", res);
 	return pnp_res;
 }
 
@@ -587,8 +586,7 @@
 	res->start = start;
 	res->end = end;
 
-	pnp_dbg(&dev->dev, "  add mem %#llx-%#llx flags %#x\n",
-		(unsigned long long) start, (unsigned long long) end, flags);
+	pnp_dbg(&dev->dev, "  add %pr\n", res);
 	return pnp_res;
 }
 
diff --git a/drivers/pnp/support.c b/drivers/pnp/support.c
index 63087d5..9585c1c 100644
--- a/drivers/pnp/support.c
+++ b/drivers/pnp/support.c
@@ -75,47 +75,14 @@
 
 void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc)
 {
-	char buf[128];
-	int len;
 	struct pnp_resource *pnp_res;
-	struct resource *res;
 
-	if (list_empty(&dev->resources)) {
+	if (list_empty(&dev->resources))
 		pnp_dbg(&dev->dev, "%s: no current resources\n", desc);
-		return;
-	}
-
-	pnp_dbg(&dev->dev, "%s: current resources:\n", desc);
-	list_for_each_entry(pnp_res, &dev->resources, list) {
-		res = &pnp_res->res;
-		len = 0;
-
-		len += scnprintf(buf + len, sizeof(buf) - len, "  %-3s ",
-				 pnp_resource_type_name(res));
-
-		if (res->flags & IORESOURCE_DISABLED) {
-			pnp_dbg(&dev->dev, "%sdisabled\n", buf);
-			continue;
-		}
-
-		switch (pnp_resource_type(res)) {
-		case IORESOURCE_IO:
-		case IORESOURCE_MEM:
-			len += scnprintf(buf + len, sizeof(buf) - len,
-					 "%#llx-%#llx flags %#lx",
-					 (unsigned long long) res->start,
-					 (unsigned long long) res->end,
-					 res->flags);
-			break;
-		case IORESOURCE_IRQ:
-		case IORESOURCE_DMA:
-			len += scnprintf(buf + len, sizeof(buf) - len,
-					 "%lld flags %#lx",
-					 (unsigned long long) res->start,
-					 res->flags);
-			break;
-		}
-		pnp_dbg(&dev->dev, "%s\n", buf);
+	else {
+		pnp_dbg(&dev->dev, "%s: current resources:\n", desc);
+		list_for_each_entry(pnp_res, &dev->resources, list)
+			pnp_dbg(&dev->dev, "%pr\n", &pnp_res->res);
 	}
 }
 
diff --git a/drivers/pnp/system.c b/drivers/pnp/system.c
index 59b9092..49c1720 100644
--- a/drivers/pnp/system.c
+++ b/drivers/pnp/system.c
@@ -22,11 +22,11 @@
 	{"", 0}
 };
 
-static void reserve_range(struct pnp_dev *dev, resource_size_t start,
-			  resource_size_t end, int port)
+static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
 {
 	char *regionid;
 	const char *pnpid = dev_name(&dev->dev);
+	resource_size_t start = r->start, end = r->end;
 	struct resource *res;
 
 	regionid = kmalloc(16, GFP_KERNEL);
@@ -48,10 +48,8 @@
 	 * example do reserve stuff they know about too, so we may well
 	 * have double reservations.
 	 */
-	dev_info(&dev->dev, "%s range 0x%llx-0x%llx %s reserved\n",
-		port ? "ioport" : "iomem",
-		(unsigned long long) start, (unsigned long long) end,
-		res ? "has been" : "could not be");
+	dev_info(&dev->dev, "%pR %s reserved\n", r,
+		 res ? "has been" : "could not be");
 }
 
 static void reserve_resources_of_dev(struct pnp_dev *dev)
@@ -77,14 +75,14 @@
 		if (res->end < res->start)
 			continue;	/* invalid */
 
-		reserve_range(dev, res->start, res->end, 1);
+		reserve_range(dev, res, 1);
 	}
 
 	for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_MEM, i)); i++) {
 		if (res->flags & IORESOURCE_DISABLED)
 			continue;
 
-		reserve_range(dev, res->start, res->end, 0);
+		reserve_range(dev, res, 0);
 	}
 }
 
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index dfcd75c..80e71fc 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -103,6 +103,8 @@
 
 source "drivers/gpu/drm/radeon/Kconfig"
 
+source "drivers/gpu/drm/nouveau/Kconfig"
+
 source "drivers/staging/octeon/Kconfig"
 
 source "drivers/staging/serqt_usb2/Kconfig"
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 2407508..81aac7f 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -60,6 +60,8 @@
 	default y if ARCH_IXP4XX
 	default y if ARCH_W90X900
 	default y if ARCH_AT91SAM9G45
+	default y if ARCH_MXC
+	default y if ARCH_OMAP34XX
 	default PCI
 
 # ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface.
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index be3c9b8..473aa1a 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -44,3 +44,5 @@
 
 obj-$(CONFIG_USB_ATM)		+= atm/
 obj-$(CONFIG_USB_SPEEDTOUCH)	+= atm/
+
+obj-$(CONFIG_USB_ULPI)		+= otg/
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index e4eca78..34d4eb9 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1461,6 +1461,12 @@
 }
 
 #endif /* CONFIG_PM */
+
+#define NOKIA_PCSUITE_ACM_INFO(x) \
+		USB_DEVICE_AND_INTERFACE_INFO(0x0421, x, \
+		USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
+		USB_CDC_ACM_PROTO_VENDOR)
+
 /*
  * USB driver structure.
  */
@@ -1519,6 +1525,57 @@
 	.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
 	},
 
+	/* Nokia S60 phones expose two ACM channels. The first is
+	 * a modem and is picked up by the standard AT-command
+	 * information below. The second is 'vendor-specific' but
+	 * is treated as a serial device at the S60 end, so we want
+	 * to expose it on Linux too. */
+	{ NOKIA_PCSUITE_ACM_INFO(0x042D), }, /* Nokia 3250 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x04D8), }, /* Nokia 5500 Sport */
+	{ NOKIA_PCSUITE_ACM_INFO(0x04C9), }, /* Nokia E50 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x0419), }, /* Nokia E60 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x044D), }, /* Nokia E61 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x0001), }, /* Nokia E61i */
+	{ NOKIA_PCSUITE_ACM_INFO(0x0475), }, /* Nokia E62 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x0508), }, /* Nokia E65 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x0418), }, /* Nokia E70 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x0425), }, /* Nokia N71 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x0486), }, /* Nokia N73 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x04DF), }, /* Nokia N75 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x000e), }, /* Nokia N77 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x0445), }, /* Nokia N80 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x042F), }, /* Nokia N91 & N91 8GB */
+	{ NOKIA_PCSUITE_ACM_INFO(0x048E), }, /* Nokia N92 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x0420), }, /* Nokia N93 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x04E6), }, /* Nokia N93i  */
+	{ NOKIA_PCSUITE_ACM_INFO(0x04B2), }, /* Nokia 5700 XpressMusic */
+	{ NOKIA_PCSUITE_ACM_INFO(0x0134), }, /* Nokia 6110 Navigator (China) */
+	{ NOKIA_PCSUITE_ACM_INFO(0x046E), }, /* Nokia 6110 Navigator */
+	{ NOKIA_PCSUITE_ACM_INFO(0x002f), }, /* Nokia 6120 classic &  */
+	{ NOKIA_PCSUITE_ACM_INFO(0x0088), }, /* Nokia 6121 classic */
+	{ NOKIA_PCSUITE_ACM_INFO(0x00fc), }, /* Nokia 6124 classic */
+	{ NOKIA_PCSUITE_ACM_INFO(0x0042), }, /* Nokia E51 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x00b0), }, /* Nokia E66 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x00ab), }, /* Nokia E71 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x0481), }, /* Nokia N76 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x0007), }, /* Nokia N81 & N81 8GB */
+	{ NOKIA_PCSUITE_ACM_INFO(0x0071), }, /* Nokia N82 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x04F0), }, /* Nokia N95 & N95-3 NAM */
+	{ NOKIA_PCSUITE_ACM_INFO(0x0070), }, /* Nokia N95 8GB  */
+	{ NOKIA_PCSUITE_ACM_INFO(0x00e9), }, /* Nokia 5320 XpressMusic */
+	{ NOKIA_PCSUITE_ACM_INFO(0x0099), }, /* Nokia 6210 Navigator, RM-367 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x0128), }, /* Nokia 6210 Navigator, RM-419 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x008f), }, /* Nokia 6220 Classic */
+	{ NOKIA_PCSUITE_ACM_INFO(0x00a0), }, /* Nokia 6650 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x007b), }, /* Nokia N78 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x0094), }, /* Nokia N85 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x003a), }, /* Nokia N96 & N96-3  */
+	{ NOKIA_PCSUITE_ACM_INFO(0x00e9), }, /* Nokia 5320 XpressMusic */
+	{ NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */
+	{ NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */
+
+	/* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
+
 	/* control interfaces with various AT-command sets */
 	{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
 		USB_CDC_ACM_PROTO_AT_V25TER) },
@@ -1533,7 +1590,6 @@
 	{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
 		USB_CDC_ACM_PROTO_AT_CDMA) },
 
-	/* NOTE:  COMM/ACM/0xff is likely MSFT RNDIS ... NOT a modem!! */
 	{ }
 };
 
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index b4bd241..7c5f4e3 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -347,13 +347,8 @@
 	goto exit;
 
 usbtmc_abort_bulk_out_clear_halt:
-	rv = usb_control_msg(data->usb_dev,
-			     usb_sndctrlpipe(data->usb_dev, 0),
-			     USB_REQ_CLEAR_FEATURE,
-			     USB_DIR_OUT | USB_TYPE_STANDARD |
-			     USB_RECIP_ENDPOINT,
-			     USB_ENDPOINT_HALT, data->bulk_out, buffer,
-			     0, USBTMC_TIMEOUT);
+	rv = usb_clear_halt(data->usb_dev,
+			    usb_sndbulkpipe(data->usb_dev, data->bulk_out));
 
 	if (rv < 0) {
 		dev_err(dev, "usb_control_msg returned %d\n", rv);
@@ -562,10 +557,16 @@
 		n_bytes = roundup(12 + this_part, 4);
 		memset(buffer + 12 + this_part, 0, n_bytes - (12 + this_part));
 
-		retval = usb_bulk_msg(data->usb_dev,
-				      usb_sndbulkpipe(data->usb_dev,
-						      data->bulk_out),
-				      buffer, n_bytes, &actual, USBTMC_TIMEOUT);
+		do {
+			retval = usb_bulk_msg(data->usb_dev,
+					      usb_sndbulkpipe(data->usb_dev,
+							      data->bulk_out),
+					      buffer, n_bytes,
+					      &actual, USBTMC_TIMEOUT);
+			if (retval != 0)
+				break;
+			n_bytes -= actual;
+		} while (n_bytes);
 
 		data->bTag_last_write = data->bTag;
 		data->bTag++;
@@ -702,14 +703,8 @@
 
 usbtmc_clear_bulk_out_halt:
 
-	rv = usb_control_msg(data->usb_dev,
-			     usb_sndctrlpipe(data->usb_dev, 0),
-			     USB_REQ_CLEAR_FEATURE,
-			     USB_DIR_OUT | USB_TYPE_STANDARD |
-			     USB_RECIP_ENDPOINT,
-			     USB_ENDPOINT_HALT,
-			     data->bulk_out, buffer, 0,
-			     USBTMC_TIMEOUT);
+	rv = usb_clear_halt(data->usb_dev,
+			    usb_sndbulkpipe(data->usb_dev, data->bulk_out));
 	if (rv < 0) {
 		dev_err(dev, "usb_control_msg returned %d\n", rv);
 		goto exit;
@@ -730,13 +725,8 @@
 	if (!buffer)
 		return -ENOMEM;
 
-	rv = usb_control_msg(data->usb_dev,
-			     usb_sndctrlpipe(data->usb_dev, 0),
-			     USB_REQ_CLEAR_FEATURE,
-			     USB_DIR_OUT | USB_TYPE_STANDARD |
-			     USB_RECIP_ENDPOINT,
-			     USB_ENDPOINT_HALT, data->bulk_out,
-			     buffer, 0, USBTMC_TIMEOUT);
+	rv = usb_clear_halt(data->usb_dev,
+			    usb_sndbulkpipe(data->usb_dev, data->bulk_out));
 
 	if (rv < 0) {
 		dev_err(&data->usb_dev->dev, "usb_control_msg returned %d\n",
@@ -759,12 +749,8 @@
 	if (!buffer)
 		return -ENOMEM;
 
-	rv = usb_control_msg(data->usb_dev, usb_sndctrlpipe(data->usb_dev, 0),
-			     USB_REQ_CLEAR_FEATURE,
-			     USB_DIR_OUT | USB_TYPE_STANDARD |
-			     USB_RECIP_ENDPOINT,
-			     USB_ENDPOINT_HALT, data->bulk_in, buffer, 0,
-			     USBTMC_TIMEOUT);
+	rv = usb_clear_halt(data->usb_dev,
+			    usb_rcvbulkpipe(data->usb_dev, data->bulk_in));
 
 	if (rv < 0) {
 		dev_err(&data->usb_dev->dev, "usb_control_msg returned %d\n",
@@ -1109,13 +1095,13 @@
 	kref_put(&data->kref, usbtmc_delete);
 }
 
-static int usbtmc_suspend (struct usb_interface *intf, pm_message_t message)
+static int usbtmc_suspend(struct usb_interface *intf, pm_message_t message)
 {
 	/* this driver does not have pending URBs */
 	return 0;
 }
 
-static int usbtmc_resume (struct usb_interface *intf)
+static int usbtmc_resume(struct usb_interface *intf)
 {
 	return 0;
 }
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 4f86447..60a45f1 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -83,6 +83,47 @@
 }
 static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id);
 
+/**
+ * store_remove_id - remove a USB device ID from this driver
+ * @driver: target device driver
+ * @buf: buffer for scanning device ID data
+ * @count: input size
+ *
+ * Removes a dynamic usb device ID from this driver.
+ */
+static ssize_t
+store_remove_id(struct device_driver *driver, const char *buf, size_t count)
+{
+	struct usb_dynid *dynid, *n;
+	struct usb_driver *usb_driver = to_usb_driver(driver);
+	u32 idVendor = 0;
+	u32 idProduct = 0;
+	int fields = 0;
+	int retval = 0;
+
+	fields = sscanf(buf, "%x %x", &idVendor, &idProduct);
+	if (fields < 2)
+		return -EINVAL;
+
+	spin_lock(&usb_driver->dynids.lock);
+	list_for_each_entry_safe(dynid, n, &usb_driver->dynids.list, node) {
+		struct usb_device_id *id = &dynid->id;
+		if ((id->idVendor == idVendor) &&
+		    (id->idProduct == idProduct)) {
+			list_del(&dynid->node);
+			kfree(dynid);
+			retval = 0;
+			break;
+		}
+	}
+	spin_unlock(&usb_driver->dynids.lock);
+
+	if (retval)
+		return retval;
+	return count;
+}
+static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id);
+
 static int usb_create_newid_file(struct usb_driver *usb_drv)
 {
 	int error = 0;
@@ -107,6 +148,21 @@
 				   &driver_attr_new_id);
 }
 
+static int
+usb_create_removeid_file(struct usb_driver *drv)
+{
+	int error = 0;
+	if (drv->probe != NULL)
+		error = driver_create_file(&drv->drvwrap.driver,
+				&driver_attr_remove_id);
+	return error;
+}
+
+static void usb_remove_removeid_file(struct usb_driver *drv)
+{
+	driver_remove_file(&drv->drvwrap.driver, &driver_attr_remove_id);
+}
+
 static void usb_free_dynids(struct usb_driver *usb_drv)
 {
 	struct usb_dynid *dynid, *n;
@@ -128,6 +184,16 @@
 {
 }
 
+static int
+usb_create_removeid_file(struct usb_driver *drv)
+{
+	return 0;
+}
+
+static void usb_remove_removeid_file(struct usb_driver *drv)
+{
+}
+
 static inline void usb_free_dynids(struct usb_driver *usb_drv)
 {
 }
@@ -774,19 +840,34 @@
 	INIT_LIST_HEAD(&new_driver->dynids.list);
 
 	retval = driver_register(&new_driver->drvwrap.driver);
+	if (retval)
+		goto out;
 
-	if (!retval) {
-		pr_info("%s: registered new interface driver %s\n",
+	usbfs_update_special();
+
+	retval = usb_create_newid_file(new_driver);
+	if (retval)
+		goto out_newid;
+
+	retval = usb_create_removeid_file(new_driver);
+	if (retval)
+		goto out_removeid;
+
+	pr_info("%s: registered new interface driver %s\n",
 			usbcore_name, new_driver->name);
-		usbfs_update_special();
-		usb_create_newid_file(new_driver);
-	} else {
-		printk(KERN_ERR "%s: error %d registering interface "
+
+out:
+	return retval;
+
+out_removeid:
+	usb_remove_newid_file(new_driver);
+out_newid:
+	driver_unregister(&new_driver->drvwrap.driver);
+
+	printk(KERN_ERR "%s: error %d registering interface "
 			"	driver %s\n",
 			usbcore_name, retval, new_driver->name);
-	}
-
-	return retval;
+	goto out;
 }
 EXPORT_SYMBOL_GPL(usb_register_driver);
 
@@ -806,6 +887,7 @@
 	pr_info("%s: deregistering interface driver %s\n",
 			usbcore_name, driver->name);
 
+	usb_remove_removeid_file(driver);
 	usb_remove_newid_file(driver);
 	usb_free_dynids(driver);
 	driver_unregister(&driver->drvwrap.driver);
@@ -948,8 +1030,6 @@
 
  done:
 	dev_vdbg(&udev->dev, "%s: status %d\n", __func__, status);
-	if (status == 0)
-		udev->autoresume_disabled = 0;
 	return status;
 }
 
@@ -1280,11 +1360,6 @@
 
 	/* Propagate the resume up the tree, if necessary */
 	if (udev->state == USB_STATE_SUSPENDED) {
-		if ((msg.event & PM_EVENT_AUTO) &&
-				udev->autoresume_disabled) {
-			status = -EPERM;
-			goto done;
-		}
 		if (parent) {
 			status = usb_autoresume_device(parent);
 			if (status == 0) {
@@ -1341,7 +1416,6 @@
 	int	status = 0;
 
 	usb_pm_lock(udev);
-	udev->auto_pm = 1;
 	udev->pm_usage_cnt += inc_usage_cnt;
 	WARN_ON(udev->pm_usage_cnt < 0);
 	if (inc_usage_cnt)
@@ -1473,7 +1547,6 @@
 	if (intf->condition == USB_INTERFACE_UNBOUND)
 		status = -ENODEV;
 	else {
-		udev->auto_pm = 1;
 		atomic_add(inc_usage_cnt, &intf->pm_usage_cnt);
 		udev->last_busy = jiffies;
 		if (inc_usage_cnt >= 0 &&
@@ -1640,8 +1713,6 @@
 
 	if (intf->condition == USB_INTERFACE_UNBOUND)
 		status = -ENODEV;
-	else if (udev->autoresume_disabled)
-		status = -EPERM;
 	else {
 		atomic_inc(&intf->pm_usage_cnt);
 		if (atomic_read(&intf->pm_usage_cnt) > 0 &&
@@ -1654,28 +1725,6 @@
 }
 EXPORT_SYMBOL_GPL(usb_autopm_get_interface_async);
 
-/**
- * usb_autopm_set_interface - set a USB interface's autosuspend state
- * @intf: the usb_interface whose state should be set
- *
- * This routine sets the autosuspend state of @intf's device according
- * to @intf's usage counter, which the caller must have set previously.
- * If the counter is <= 0, the device is autosuspended (if it isn't
- * already suspended and if nothing else prevents the autosuspend).  If
- * the counter is > 0, the device is autoresumed (if it isn't already
- * awake).
- */
-int usb_autopm_set_interface(struct usb_interface *intf)
-{
-	int	status;
-
-	status = usb_autopm_do_interface(intf, 0);
-	dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
-			__func__, status, atomic_read(&intf->pm_usage_cnt));
-	return status;
-}
-EXPORT_SYMBOL_GPL(usb_autopm_set_interface);
-
 #else
 
 void usb_autosuspend_work(struct work_struct *work)
@@ -1707,7 +1756,6 @@
 
 	do_unbind_rebind(udev, DO_UNBIND);
 	usb_pm_lock(udev);
-	udev->auto_pm = 0;
 	status = usb_suspend_both(udev, msg);
 	usb_pm_unlock(udev);
 	return status;
@@ -1730,7 +1778,6 @@
 	int	status;
 
 	usb_pm_lock(udev);
-	udev->auto_pm = 0;
 	status = usb_resume_both(udev, msg);
 	udev->last_busy = jiffies;
 	usb_pm_unlock(udev);
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 222ee07..bfc6c2e 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -99,6 +99,7 @@
 		printk(KERN_ERR "class_create failed for usb devices\n");
 		kfree(usb_class);
 		usb_class = NULL;
+		goto exit;
 	}
 	usb_class->class->devnode = usb_devnode;
 
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 05e6d31..bdf87a8 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -139,7 +139,7 @@
 
 	if (best) {
 		i = best->desc.bConfigurationValue;
-		dev_info(&udev->dev,
+		dev_dbg(&udev->dev,
 			"configuration #%d chosen from %d choice%s\n",
 			i, num_configs, plural(num_configs));
 	} else {
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 34de475..6dac3b8 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -38,6 +38,7 @@
 #include <asm/unaligned.h>
 #include <linux/platform_device.h>
 #include <linux/workqueue.h>
+#include <linux/mutex.h>
 
 #include <linux/usb.h>
 
@@ -1275,13 +1276,16 @@
 
 	if (usb_endpoint_xfer_control(&urb->ep->desc)
 	    && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) {
-		if (hcd->self.uses_dma)
+		if (hcd->self.uses_dma) {
 			urb->setup_dma = dma_map_single(
 					hcd->self.controller,
 					urb->setup_packet,
 					sizeof(struct usb_ctrlrequest),
 					DMA_TO_DEVICE);
-		else if (hcd->driver->flags & HCD_LOCAL_MEM)
+			if (dma_mapping_error(hcd->self.controller,
+						urb->setup_dma))
+				return -EAGAIN;
+		} else if (hcd->driver->flags & HCD_LOCAL_MEM)
 			ret = hcd_alloc_coherent(
 					urb->dev->bus, mem_flags,
 					&urb->setup_dma,
@@ -1293,13 +1297,16 @@
 	dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
 	if (ret == 0 && urb->transfer_buffer_length != 0
 	    && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
-		if (hcd->self.uses_dma)
+		if (hcd->self.uses_dma) {
 			urb->transfer_dma = dma_map_single (
 					hcd->self.controller,
 					urb->transfer_buffer,
 					urb->transfer_buffer_length,
 					dir);
-		else if (hcd->driver->flags & HCD_LOCAL_MEM) {
+			if (dma_mapping_error(hcd->self.controller,
+						urb->transfer_dma))
+				return -EAGAIN;
+		} else if (hcd->driver->flags & HCD_LOCAL_MEM) {
 			ret = hcd_alloc_coherent(
 					urb->dev->bus, mem_flags,
 					&urb->transfer_dma,
@@ -1589,19 +1596,32 @@
 	}
 }
 
-/* Check whether a new configuration or alt setting for an interface
- * will exceed the bandwidth for the bus (or the host controller resources).
- * Only pass in a non-NULL config or interface, not both!
- * Passing NULL for both new_config and new_intf means the device will be
- * de-configured by issuing a set configuration 0 command.
+/**
+ * Check whether a new bandwidth setting exceeds the bus bandwidth.
+ * @new_config: new configuration to install
+ * @cur_alt: the current alternate interface setting
+ * @new_alt: alternate interface setting that is being installed
+ *
+ * To change configurations, pass in the new configuration in new_config,
+ * and pass NULL for cur_alt and new_alt.
+ *
+ * To reset a device's configuration (put the device in the ADDRESSED state),
+ * pass in NULL for new_config, cur_alt, and new_alt.
+ *
+ * To change alternate interface settings, pass in NULL for new_config,
+ * pass in the current alternate interface setting in cur_alt,
+ * and pass in the new alternate interface setting in new_alt.
+ *
+ * Returns an error if the requested bandwidth change exceeds the
+ * bus bandwidth or host controller internal resources.
  */
-int usb_hcd_check_bandwidth(struct usb_device *udev,
+int usb_hcd_alloc_bandwidth(struct usb_device *udev,
 		struct usb_host_config *new_config,
-		struct usb_interface *new_intf)
+		struct usb_host_interface *cur_alt,
+		struct usb_host_interface *new_alt)
 {
 	int num_intfs, i, j;
-	struct usb_interface_cache *intf_cache;
-	struct usb_host_interface *alt = 0;
+	struct usb_host_interface *alt = NULL;
 	int ret = 0;
 	struct usb_hcd *hcd;
 	struct usb_host_endpoint *ep;
@@ -1611,7 +1631,7 @@
 		return 0;
 
 	/* Configuration is being removed - set configuration 0 */
-	if (!new_config && !new_intf) {
+	if (!new_config && !cur_alt) {
 		for (i = 1; i < 16; ++i) {
 			ep = udev->ep_out[i];
 			if (ep)
@@ -1648,19 +1668,12 @@
 			}
 		}
 		for (i = 0; i < num_intfs; ++i) {
+			/* Set up endpoints for alternate interface setting 0 */
+			alt = usb_find_alt_setting(new_config, i, 0);
+			if (!alt)
+				/* No alt setting 0? Pick the first setting. */
+				alt = &new_config->intf_cache[i]->altsetting[0];
 
-			/* Dig the endpoints for alt setting 0 out of the
-			 * interface cache for this interface
-			 */
-			intf_cache = new_config->intf_cache[i];
-			for (j = 0; j < intf_cache->num_altsetting; j++) {
-				if (intf_cache->altsetting[j].desc.bAlternateSetting == 0)
-					alt = &intf_cache->altsetting[j];
-			}
-			if (!alt) {
-				printk(KERN_DEBUG "Did not find alt setting 0 for intf %d\n", i);
-				continue;
-			}
 			for (j = 0; j < alt->desc.bNumEndpoints; j++) {
 				ret = hcd->driver->add_endpoint(hcd, udev, &alt->endpoint[j]);
 				if (ret < 0)
@@ -1668,6 +1681,22 @@
 			}
 		}
 	}
+	if (cur_alt && new_alt) {
+		/* Drop all the endpoints in the current alt setting */
+		for (i = 0; i < cur_alt->desc.bNumEndpoints; i++) {
+			ret = hcd->driver->drop_endpoint(hcd, udev,
+					&cur_alt->endpoint[i]);
+			if (ret < 0)
+				goto reset;
+		}
+		/* Add all the endpoints in the new alt setting */
+		for (i = 0; i < new_alt->desc.bNumEndpoints; i++) {
+			ret = hcd->driver->add_endpoint(hcd, udev,
+					&new_alt->endpoint[i]);
+			if (ret < 0)
+				goto reset;
+		}
+	}
 	ret = hcd->driver->check_bandwidth(hcd, udev);
 reset:
 	if (ret < 0)
@@ -1984,6 +2013,7 @@
 #ifdef CONFIG_PM
 	INIT_WORK(&hcd->wakeup_work, hcd_resume_work);
 #endif
+	mutex_init(&hcd->bandwidth_mutex);
 
 	hcd->driver = driver;
 	hcd->product_desc = (driver->product_desc) ? driver->product_desc :
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index 79782a1..d8b43ae 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -111,6 +111,20 @@
 	u64			rsrc_len;	/* memory/io resource length */
 	unsigned		power_budget;	/* in mA, 0 = no limit */
 
+	/* bandwidth_mutex should be taken before adding or removing
+	 * any new bus bandwidth constraints:
+	 *   1. Before adding a configuration for a new device.
+	 *   2. Before removing the configuration to put the device into
+	 *      the addressed state.
+	 *   3. Before selecting a different configuration.
+	 *   4. Before selecting an alternate interface setting.
+	 *
+	 * bandwidth_mutex should be dropped after a successful control message
+	 * to the device, or resetting the bandwidth after a failed attempt.
+	 */
+	struct mutex		bandwidth_mutex;
+
+
 #define HCD_BUFFER_POOLS	4
 	struct dma_pool		*pool [HCD_BUFFER_POOLS];
 
@@ -290,9 +304,10 @@
 extern void usb_hcd_reset_endpoint(struct usb_device *udev,
 		struct usb_host_endpoint *ep);
 extern void usb_hcd_synchronize_unlinks(struct usb_device *udev);
-extern int usb_hcd_check_bandwidth(struct usb_device *udev,
+extern int usb_hcd_alloc_bandwidth(struct usb_device *udev,
 		struct usb_host_config *new_config,
-		struct usb_interface *new_intf);
+		struct usb_host_interface *old_alt,
+		struct usb_host_interface *new_alt);
 extern int usb_hcd_get_frame_number(struct usb_device *udev);
 
 extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 0f857e6..06af970 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -45,7 +45,6 @@
 
 	/* buffer for urb ... with extra space in case of babble */
 	char			(*buffer)[8];
-	dma_addr_t		buffer_dma;	/* DMA address for buffer */
 	union {
 		struct usb_hub_status	hub;
 		struct usb_port_status	port;
@@ -61,6 +60,8 @@
 							status change */
 	unsigned long		busy_bits[1];	/* ports being reset or
 							resumed */
+	unsigned long		removed_bits[1]; /* ports with a "removed"
+							device present */
 #if USB_MAXCHILDREN > 31 /* 8*sizeof(unsigned long) - 1 */
 #error event_bits[] is too short!
 #endif
@@ -70,6 +71,7 @@
 
 	unsigned		mA_per_port;	/* current for each child */
 
+	unsigned		init_done:1;
 	unsigned		limited_power:1;
 	unsigned		quiescing:1;
 	unsigned		disconnected:1;
@@ -374,12 +376,13 @@
 {
 	unsigned long	flags;
 
-	/* Suppress autosuspend until khubd runs */
-	atomic_set(&to_usb_interface(hub->intfdev)->pm_usage_cnt, 1);
-
 	spin_lock_irqsave(&hub_event_lock, flags);
 	if (!hub->disconnected && list_empty(&hub->event_list)) {
 		list_add_tail(&hub->event_list, &hub_event_list);
+
+		/* Suppress autosuspend until khubd runs */
+		usb_autopm_get_interface_no_resume(
+				to_usb_interface(hub->intfdev));
 		wake_up(&khubd_wait);
 	}
 	spin_unlock_irqrestore(&hub_event_lock, flags);
@@ -636,8 +639,35 @@
  	kick_khubd(hub);
 }
 
+/**
+ * usb_remove_device - disable a device's port on its parent hub
+ * @udev: device to be disabled and removed
+ * Context: @udev locked, must be able to sleep.
+ *
+ * After @udev's port has been disabled, khubd is notified and it will
+ * see that the device has been disconnected.  When the device is
+ * physically unplugged and something is plugged in, the events will
+ * be received and processed normally.
+ */
+int usb_remove_device(struct usb_device *udev)
+{
+	struct usb_hub *hub;
+	struct usb_interface *intf;
+
+	if (!udev->parent)	/* Can't remove a root hub */
+		return -EINVAL;
+	hub = hdev_to_hub(udev->parent);
+	intf = to_usb_interface(hub->intfdev);
+
+	usb_autopm_get_interface(intf);
+	set_bit(udev->portnum, hub->removed_bits);
+	hub_port_logical_disconnect(hub, udev->portnum);
+	usb_autopm_put_interface(intf);
+	return 0;
+}
+
 enum hub_activation_type {
-	HUB_INIT, HUB_INIT2, HUB_INIT3,
+	HUB_INIT, HUB_INIT2, HUB_INIT3,		/* INITs must come first */
 	HUB_POST_RESET, HUB_RESUME, HUB_RESET_RESUME,
 };
 
@@ -682,8 +712,8 @@
 					msecs_to_jiffies(delay));
 
 			/* Suppress autosuspend until init is done */
-			atomic_set(&to_usb_interface(hub->intfdev)->
-					pm_usage_cnt, 1);
+			usb_autopm_get_interface_no_resume(
+					to_usb_interface(hub->intfdev));
 			return;		/* Continues at init2: below */
 		} else {
 			hub_power_on(hub, true);
@@ -731,6 +761,13 @@
 					USB_PORT_FEAT_C_ENABLE);
 		}
 
+		/* We can forget about a "removed" device when there's a
+		 * physical disconnect or the connect status changes.
+		 */
+		if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
+				(portchange & USB_PORT_STAT_C_CONNECTION))
+			clear_bit(port1, hub->removed_bits);
+
 		if (!udev || udev->state == USB_STATE_NOTATTACHED) {
 			/* Tell khubd to disconnect the device or
 			 * check for a new connection
@@ -783,6 +820,7 @@
 	}
  init3:
 	hub->quiescing = 0;
+	hub->init_done = 1;
 
 	status = usb_submit_urb(hub->urb, GFP_NOIO);
 	if (status < 0)
@@ -792,6 +830,10 @@
 
 	/* Scan all ports that need attention */
 	kick_khubd(hub);
+
+	/* Allow autosuspend if it was suppressed */
+	if (type <= HUB_INIT3)
+		usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
 }
 
 /* Implement the continuations for the delays above */
@@ -819,6 +861,11 @@
 	int i;
 
 	cancel_delayed_work_sync(&hub->init_work);
+	if (!hub->init_done) {
+		hub->init_done = 1;
+		usb_autopm_put_interface_no_suspend(
+				to_usb_interface(hub->intfdev));
+	}
 
 	/* khubd and related activity won't re-trigger */
 	hub->quiescing = 1;
@@ -869,8 +916,7 @@
 	int maxp, ret;
 	char *message = "out of memory";
 
-	hub->buffer = usb_buffer_alloc(hdev, sizeof(*hub->buffer), GFP_KERNEL,
-			&hub->buffer_dma);
+	hub->buffer = kmalloc(sizeof(*hub->buffer), GFP_KERNEL);
 	if (!hub->buffer) {
 		ret = -ENOMEM;
 		goto fail;
@@ -1111,8 +1157,6 @@
 
 	usb_fill_int_urb(hub->urb, hdev, pipe, *hub->buffer, maxp, hub_irq,
 		hub, endpoint->bInterval);
-	hub->urb->transfer_dma = hub->buffer_dma;
-	hub->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
 
 	/* maybe cycle the hub leds */
 	if (hub->has_indicators && blinkenlights)
@@ -1144,7 +1188,10 @@
 
 	/* Take the hub off the event list and don't let it be added again */
 	spin_lock_irq(&hub_event_lock);
-	list_del_init(&hub->event_list);
+	if (!list_empty(&hub->event_list)) {
+		list_del_init(&hub->event_list);
+		usb_autopm_put_interface_no_suspend(intf);
+	}
 	hub->disconnected = 1;
 	spin_unlock_irq(&hub_event_lock);
 
@@ -1162,8 +1209,7 @@
 	kfree(hub->port_owners);
 	kfree(hub->descriptor);
 	kfree(hub->status);
-	usb_buffer_free(hub->hdev, sizeof(*hub->buffer), hub->buffer,
-			hub->buffer_dma);
+	kfree(hub->buffer);
 
 	kref_put(&hub->kref, hub_release);
 }
@@ -1630,7 +1676,7 @@
 	if (!udev->bus->is_b_host
 			&& udev->config
 			&& udev->parent == udev->bus->root_hub) {
-		struct usb_otg_descriptor	*desc = 0;
+		struct usb_otg_descriptor	*desc = NULL;
 		struct usb_bus			*bus = udev->bus;
 
 		/* descriptor may appear anywhere in config */
@@ -2123,9 +2169,13 @@
 				USB_DEVICE_REMOTE_WAKEUP, 0,
 				NULL, 0,
 				USB_CTRL_SET_TIMEOUT);
-		if (status)
+		if (status) {
 			dev_dbg(&udev->dev, "won't remote wakeup, status %d\n",
 					status);
+			/* bail if autosuspend is requested */
+			if (msg.event & PM_EVENT_AUTO)
+				return status;
+		}
 	}
 
 	/* see 7.1.7.6 */
@@ -2134,7 +2184,8 @@
 		dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
 				port1, status);
 		/* paranoia:  "should not happen" */
-		(void) usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+		if (udev->do_remote_wakeup)
+			(void) usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
 				USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE,
 				USB_DEVICE_REMOTE_WAKEUP, 0,
 				NULL, 0,
@@ -2965,6 +3016,13 @@
 		usb_disconnect(&hdev->children[port1-1]);
 	clear_bit(port1, hub->change_bits);
 
+	/* We can forget about a "removed" device when there's a physical
+	 * disconnect or the connect status changes.
+	 */
+	if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
+			(portchange & USB_PORT_STAT_C_CONNECTION))
+		clear_bit(port1, hub->removed_bits);
+
 	if (portchange & (USB_PORT_STAT_C_CONNECTION |
 				USB_PORT_STAT_C_ENABLE)) {
 		status = hub_port_debounce(hub, port1);
@@ -2978,8 +3036,11 @@
 		}
 	}
 
-	/* Return now if debouncing failed or nothing is connected */
-	if (!(portstatus & USB_PORT_STAT_CONNECTION)) {
+	/* Return now if debouncing failed or nothing is connected or
+	 * the device was "removed".
+	 */
+	if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
+			test_bit(port1, hub->removed_bits)) {
 
 		/* maybe switch power back on (e.g. root hub was reset) */
 		if ((wHubCharacteristics & HUB_CHAR_LPSM) < 2
@@ -3189,7 +3250,7 @@
 		 * disconnected while waiting for the lock to succeed. */
 		usb_lock_device(hdev);
 		if (unlikely(hub->disconnected))
-			goto loop;
+			goto loop2;
 
 		/* If the hub has died, clean up after it */
 		if (hdev->state == USB_STATE_NOTATTACHED) {
@@ -3338,11 +3399,15 @@
 			}
 		}
 
-loop_autopm:
-		/* Allow autosuspend if we're not going to run again */
-		if (list_empty(&hub->event_list))
-			usb_autopm_enable(intf);
-loop:
+ loop_autopm:
+		/* Balance the usb_autopm_get_interface() above */
+		usb_autopm_put_interface_no_suspend(intf);
+ loop:
+		/* Balance the usb_autopm_get_interface_no_resume() in
+		 * kick_khubd() and allow autosuspend.
+		 */
+		usb_autopm_put_interface(intf);
+ loop2:
 		usb_unlock_device(hdev);
 		kref_put(&hub->kref, hub_release);
 
@@ -3534,6 +3599,7 @@
 {
 	struct usb_device		*parent_hdev = udev->parent;
 	struct usb_hub			*parent_hub;
+	struct usb_hcd			*hcd = bus_to_hcd(udev->bus);
 	struct usb_device_descriptor	descriptor = udev->descriptor;
 	int 				i, ret = 0;
 	int				port1 = udev->portnum;
@@ -3577,6 +3643,16 @@
 	/* Restore the device's previous configuration */
 	if (!udev->actconfig)
 		goto done;
+
+	mutex_lock(&hcd->bandwidth_mutex);
+	ret = usb_hcd_alloc_bandwidth(udev, udev->actconfig, NULL, NULL);
+	if (ret < 0) {
+		dev_warn(&udev->dev,
+				"Busted HC?  Not enough HCD resources for "
+				"old configuration.\n");
+		mutex_unlock(&hcd->bandwidth_mutex);
+		goto re_enumerate;
+	}
 	ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
 			USB_REQ_SET_CONFIGURATION, 0,
 			udev->actconfig->desc.bConfigurationValue, 0,
@@ -3585,8 +3661,10 @@
 		dev_err(&udev->dev,
 			"can't restore configuration #%d (error=%d)\n",
 			udev->actconfig->desc.bConfigurationValue, ret);
+		mutex_unlock(&hcd->bandwidth_mutex);
 		goto re_enumerate;
   	}
+	mutex_unlock(&hcd->bandwidth_mutex);
 	usb_set_device_state(udev, USB_STATE_CONFIGURED);
 
 	/* Put interfaces back into the same altsettings as before.
@@ -3596,7 +3674,8 @@
 	 * endpoint state.
 	 */
 	for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
-		struct usb_interface *intf = udev->actconfig->interface[i];
+		struct usb_host_config *config = udev->actconfig;
+		struct usb_interface *intf = config->interface[i];
 		struct usb_interface_descriptor *desc;
 
 		desc = &intf->cur_altsetting->desc;
@@ -3605,6 +3684,17 @@
 			usb_enable_interface(udev, intf, true);
 			ret = 0;
 		} else {
+			/* We've just reset the device, so it will think alt
+			 * setting 0 is installed.  For usb_set_interface() to
+			 * work properly, we need to set the current alternate
+			 * interface setting to 0 (or the first alt setting, if
+			 * the device doesn't have alt setting 0).
+			 */
+			intf->cur_altsetting =
+				usb_find_alt_setting(config, i, 0);
+			if (!intf->cur_altsetting)
+				intf->cur_altsetting =
+					&config->intf_cache[i]->altsetting[0];
 			ret = usb_set_interface(udev, desc->bInterfaceNumber,
 					desc->bAlternateSetting);
 		}
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index e80f1af..1b99484 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -393,13 +393,7 @@
 	if (io->entries <= 0)
 		return io->entries;
 
-	/* If we're running on an xHCI host controller, queue the whole scatter
-	 * gather list with one call to urb_enqueue().  This is only for bulk,
-	 * as that endpoint type does not care how the data gets broken up
-	 * across frames.
-	 */
-	if (usb_pipebulk(pipe) &&
-			bus_to_hcd(dev->bus)->driver->flags & HCD_USB3) {
+	if (dev->bus->sg_tablesize > 0) {
 		io->urbs = kmalloc(sizeof *io->urbs, mem_flags);
 		use_sg = true;
 	} else {
@@ -409,7 +403,7 @@
 	if (!io->urbs)
 		goto nomem;
 
-	urb_flags = URB_NO_INTERRUPT;
+	urb_flags = 0;
 	if (dma)
 		urb_flags |= URB_NO_TRANSFER_DMA_MAP;
 	if (usb_pipein(pipe))
@@ -441,6 +435,7 @@
 		io->urbs[0]->num_sgs = io->entries;
 		io->entries = 1;
 	} else {
+		urb_flags |= URB_NO_INTERRUPT;
 		for_each_sg(sg, sg, io->entries, i) {
 			unsigned len;
 
@@ -1303,6 +1298,7 @@
 {
 	struct usb_interface *iface;
 	struct usb_host_interface *alt;
+	struct usb_hcd *hcd = bus_to_hcd(dev->bus);
 	int ret;
 	int manual = 0;
 	unsigned int epaddr;
@@ -1325,6 +1321,18 @@
 		return -EINVAL;
 	}
 
+	/* Make sure we have enough bandwidth for this alternate interface.
+	 * Remove the current alt setting and add the new alt setting.
+	 */
+	mutex_lock(&hcd->bandwidth_mutex);
+	ret = usb_hcd_alloc_bandwidth(dev, NULL, iface->cur_altsetting, alt);
+	if (ret < 0) {
+		dev_info(&dev->dev, "Not enough bandwidth for altsetting %d\n",
+				alternate);
+		mutex_unlock(&hcd->bandwidth_mutex);
+		return ret;
+	}
+
 	if (dev->quirks & USB_QUIRK_NO_SET_INTF)
 		ret = -EPIPE;
 	else
@@ -1340,8 +1348,13 @@
 			"manual set_interface for iface %d, alt %d\n",
 			interface, alternate);
 		manual = 1;
-	} else if (ret < 0)
+	} else if (ret < 0) {
+		/* Re-instate the old alt setting */
+		usb_hcd_alloc_bandwidth(dev, NULL, alt, iface->cur_altsetting);
+		mutex_unlock(&hcd->bandwidth_mutex);
 		return ret;
+	}
+	mutex_unlock(&hcd->bandwidth_mutex);
 
 	/* FIXME drivers shouldn't need to replicate/bugfix the logic here
 	 * when they implement async or easily-killable versions of this or
@@ -1423,6 +1436,7 @@
 {
 	int			i, retval;
 	struct usb_host_config	*config;
+	struct usb_hcd *hcd = bus_to_hcd(dev->bus);
 
 	if (dev->state == USB_STATE_SUSPENDED)
 		return -EHOSTUNREACH;
@@ -1438,12 +1452,46 @@
 	}
 
 	config = dev->actconfig;
+	retval = 0;
+	mutex_lock(&hcd->bandwidth_mutex);
+	/* Make sure we have enough bandwidth for each alternate setting 0 */
+	for (i = 0; i < config->desc.bNumInterfaces; i++) {
+		struct usb_interface *intf = config->interface[i];
+		struct usb_host_interface *alt;
+
+		alt = usb_altnum_to_altsetting(intf, 0);
+		if (!alt)
+			alt = &intf->altsetting[0];
+		if (alt != intf->cur_altsetting)
+			retval = usb_hcd_alloc_bandwidth(dev, NULL,
+					intf->cur_altsetting, alt);
+		if (retval < 0)
+			break;
+	}
+	/* If not, reinstate the old alternate settings */
+	if (retval < 0) {
+reset_old_alts:
+		for (; i >= 0; i--) {
+			struct usb_interface *intf = config->interface[i];
+			struct usb_host_interface *alt;
+
+			alt = usb_altnum_to_altsetting(intf, 0);
+			if (!alt)
+				alt = &intf->altsetting[0];
+			if (alt != intf->cur_altsetting)
+				usb_hcd_alloc_bandwidth(dev, NULL,
+						alt, intf->cur_altsetting);
+		}
+		mutex_unlock(&hcd->bandwidth_mutex);
+		return retval;
+	}
 	retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
 			USB_REQ_SET_CONFIGURATION, 0,
 			config->desc.bConfigurationValue, 0,
 			NULL, 0, USB_CTRL_SET_TIMEOUT);
 	if (retval < 0)
-		return retval;
+		goto reset_old_alts;
+	mutex_unlock(&hcd->bandwidth_mutex);
 
 	/* re-init hc/hcd interface/endpoint state */
 	for (i = 0; i < config->desc.bNumInterfaces; i++) {
@@ -1585,7 +1633,7 @@
  *
  * See usb_queue_reset_device() for more details
  */
-void __usb_queue_reset_device(struct work_struct *ws)
+static void __usb_queue_reset_device(struct work_struct *ws)
 {
 	int rc;
 	struct usb_interface *iface =
@@ -1652,6 +1700,7 @@
 	int i, ret;
 	struct usb_host_config *cp = NULL;
 	struct usb_interface **new_interfaces = NULL;
+	struct usb_hcd *hcd = bus_to_hcd(dev->bus);
 	int n, nintf;
 
 	if (dev->authorized == 0 || configuration == -1)
@@ -1721,12 +1770,11 @@
 	 * host controller will not allow submissions to dropped endpoints.  If
 	 * this call fails, the device state is unchanged.
 	 */
-	if (cp)
-		ret = usb_hcd_check_bandwidth(dev, cp, NULL);
-	else
-		ret = usb_hcd_check_bandwidth(dev, NULL, NULL);
+	mutex_lock(&hcd->bandwidth_mutex);
+	ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
 	if (ret < 0) {
 		usb_autosuspend_device(dev);
+		mutex_unlock(&hcd->bandwidth_mutex);
 		goto free_interfaces;
 	}
 
@@ -1752,10 +1800,12 @@
 	dev->actconfig = cp;
 	if (!cp) {
 		usb_set_device_state(dev, USB_STATE_ADDRESS);
-		usb_hcd_check_bandwidth(dev, NULL, NULL);
+		usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
 		usb_autosuspend_device(dev);
+		mutex_unlock(&hcd->bandwidth_mutex);
 		goto free_interfaces;
 	}
+	mutex_unlock(&hcd->bandwidth_mutex);
 	usb_set_device_state(dev, USB_STATE_CONFIGURED);
 
 	/* Initialize the new interface structures and the
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 7ec3041..1547700 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -139,6 +139,16 @@
 static DEVICE_ATTR(devnum, S_IRUGO, show_devnum, NULL);
 
 static ssize_t
+show_devpath(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct usb_device *udev;
+
+	udev = to_usb_device(dev);
+	return sprintf(buf, "%s\n", udev->devpath);
+}
+static DEVICE_ATTR(devpath, S_IRUGO, show_devpath, NULL);
+
+static ssize_t
 show_version(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	struct usb_device *udev;
@@ -317,7 +327,6 @@
 
 static const char on_string[] = "on";
 static const char auto_string[] = "auto";
-static const char suspend_string[] = "suspend";
 
 static ssize_t
 show_level(struct device *dev, struct device_attribute *attr, char *buf)
@@ -325,13 +334,8 @@
 	struct usb_device *udev = to_usb_device(dev);
 	const char *p = auto_string;
 
-	if (udev->state == USB_STATE_SUSPENDED) {
-		if (udev->autoresume_disabled)
-			p = suspend_string;
-	} else {
-		if (udev->autosuspend_disabled)
-			p = on_string;
-	}
+	if (udev->state != USB_STATE_SUSPENDED && udev->autosuspend_disabled)
+		p = on_string;
 	return sprintf(buf, "%s\n", p);
 }
 
@@ -343,7 +347,7 @@
 	int len = count;
 	char *cp;
 	int rc = 0;
-	int old_autosuspend_disabled, old_autoresume_disabled;
+	int old_autosuspend_disabled;
 
 	cp = memchr(buf, '\n', count);
 	if (cp)
@@ -351,7 +355,6 @@
 
 	usb_lock_device(udev);
 	old_autosuspend_disabled = udev->autosuspend_disabled;
-	old_autoresume_disabled = udev->autoresume_disabled;
 
 	/* Setting the flags without calling usb_pm_lock is a subject to
 	 * races, but who cares...
@@ -359,28 +362,18 @@
 	if (len == sizeof on_string - 1 &&
 			strncmp(buf, on_string, len) == 0) {
 		udev->autosuspend_disabled = 1;
-		udev->autoresume_disabled = 0;
 		rc = usb_external_resume_device(udev, PMSG_USER_RESUME);
 
 	} else if (len == sizeof auto_string - 1 &&
 			strncmp(buf, auto_string, len) == 0) {
 		udev->autosuspend_disabled = 0;
-		udev->autoresume_disabled = 0;
 		rc = usb_external_resume_device(udev, PMSG_USER_RESUME);
 
-	} else if (len == sizeof suspend_string - 1 &&
-			strncmp(buf, suspend_string, len) == 0) {
-		udev->autosuspend_disabled = 0;
-		udev->autoresume_disabled = 1;
-		rc = usb_external_suspend_device(udev, PMSG_USER_SUSPEND);
-
 	} else
 		rc = -EINVAL;
 
-	if (rc) {
+	if (rc)
 		udev->autosuspend_disabled = old_autosuspend_disabled;
-		udev->autoresume_disabled = old_autoresume_disabled;
-	}
 	usb_unlock_device(udev);
 	return (rc < 0 ? rc : count);
 }
@@ -508,6 +501,28 @@
 static DEVICE_ATTR(authorized, 0644,
 	    usb_dev_authorized_show, usb_dev_authorized_store);
 
+/* "Safely remove a device" */
+static ssize_t usb_remove_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct usb_device *udev = to_usb_device(dev);
+	int rc = 0;
+
+	usb_lock_device(udev);
+	if (udev->state != USB_STATE_NOTATTACHED) {
+
+		/* To avoid races, first unconfigure and then remove */
+		usb_set_configuration(udev, -1);
+		rc = usb_remove_device(udev);
+	}
+	if (rc == 0)
+		rc = count;
+	usb_unlock_device(udev);
+	return rc;
+}
+static DEVICE_ATTR(remove, 0200, NULL, usb_remove_store);
+
 
 static struct attribute *dev_attrs[] = {
 	/* current configuration's attributes */
@@ -516,8 +531,8 @@
 	&dev_attr_bConfigurationValue.attr,
 	&dev_attr_bmAttributes.attr,
 	&dev_attr_bMaxPower.attr,
-	&dev_attr_urbnum.attr,
 	/* device attributes */
+	&dev_attr_urbnum.attr,
 	&dev_attr_idVendor.attr,
 	&dev_attr_idProduct.attr,
 	&dev_attr_bcdDevice.attr,
@@ -529,10 +544,12 @@
 	&dev_attr_speed.attr,
 	&dev_attr_busnum.attr,
 	&dev_attr_devnum.attr,
+	&dev_attr_devpath.attr,
 	&dev_attr_version.attr,
 	&dev_attr_maxchild.attr,
 	&dev_attr_quirks.attr,
 	&dev_attr_authorized.attr,
+	&dev_attr_remove.attr,
 	NULL,
 };
 static struct attribute_group dev_attr_grp = {
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 0885d4a..e7cae13 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -429,8 +429,16 @@
 	case USB_ENDPOINT_XFER_ISOC:
 	case USB_ENDPOINT_XFER_INT:
 		/* too small? */
-		if (urb->interval <= 0)
-			return -EINVAL;
+		switch (dev->speed) {
+		case USB_SPEED_VARIABLE:
+			if (urb->interval < 6)
+				return -EINVAL;
+			break;
+		default:
+			if (urb->interval <= 0)
+				return -EINVAL;
+			break;
+		}
 		/* too big? */
 		switch (dev->speed) {
 		case USB_SPEED_SUPER:	/* units are 125us */
@@ -438,6 +446,10 @@
 			if (urb->interval > (1 << 15))
 				return -EINVAL;
 			max = 1 << 15;
+		case USB_SPEED_VARIABLE:
+			if (urb->interval > 16)
+				return -EINVAL;
+			break;
 		case USB_SPEED_HIGH:	/* units are microframes */
 			/* NOTE usb handles 2^15 */
 			if (urb->interval > (1024 * 8))
@@ -461,8 +473,10 @@
 		default:
 			return -EINVAL;
 		}
-		/* Round down to a power of 2, no more than max */
-		urb->interval = min(max, 1 << ilog2(urb->interval));
+		if (dev->speed != USB_SPEED_VARIABLE) {
+			/* Round down to a power of 2, no more than max */
+			urb->interval = min(max, 1 << ilog2(urb->interval));
+		}
 	}
 
 	return usb_hcd_submit_urb(urb, mem_flags);
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index b1b85ab..4e2c6df 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -64,6 +64,43 @@
 
 
 /**
+ * usb_find_alt_setting() - Given a configuration, find the alternate setting
+ * for the given interface.
+ * @config - the configuration to search (not necessarily the current config).
+ * @iface_num - interface number to search in
+ * @alt_num - alternate interface setting number to search for.
+ *
+ * Search the configuration's interface cache for the given alt setting.
+ */
+struct usb_host_interface *usb_find_alt_setting(
+		struct usb_host_config *config,
+		unsigned int iface_num,
+		unsigned int alt_num)
+{
+	struct usb_interface_cache *intf_cache = NULL;
+	int i;
+
+	for (i = 0; i < config->desc.bNumInterfaces; i++) {
+		if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber
+				== iface_num) {
+			intf_cache = config->intf_cache[i];
+			break;
+		}
+	}
+	if (!intf_cache)
+		return NULL;
+	for (i = 0; i < intf_cache->num_altsetting; i++)
+		if (intf_cache->altsetting[i].desc.bAlternateSetting == alt_num)
+			return &intf_cache->altsetting[i];
+
+	printk(KERN_DEBUG "Did not find alt setting %u for intf %u, "
+			"config %u\n", alt_num, iface_num,
+			config->desc.bConfigurationValue);
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(usb_find_alt_setting);
+
+/**
  * usb_ifnum_to_if - get the interface object with a given interface number
  * @dev: the device whose current configuration is considered
  * @ifnum: the desired interface
@@ -130,24 +167,17 @@
 }
 EXPORT_SYMBOL_GPL(usb_altnum_to_altsetting);
 
-struct find_interface_arg {
-	int minor;
-	struct usb_interface *interface;
-};
-
 static int __find_interface(struct device *dev, void *data)
 {
-	struct find_interface_arg *arg = data;
+	int *minor = data;
 	struct usb_interface *intf;
 
 	if (!is_usb_interface(dev))
 		return 0;
 
 	intf = to_usb_interface(dev);
-	if (intf->minor != -1 && intf->minor == arg->minor) {
-		arg->interface = intf;
+	if (intf->minor != -1 && intf->minor == *minor)
 		return 1;
-	}
 	return 0;
 }
 
@@ -156,21 +186,20 @@
  * @drv: the driver whose current configuration is considered
  * @minor: the minor number of the desired device
  *
- * This walks the driver device list and returns a pointer to the interface
+ * This walks the bus device list and returns a pointer to the interface
  * with the matching minor.  Note, this only works for devices that share the
  * USB major number.
  */
 struct usb_interface *usb_find_interface(struct usb_driver *drv, int minor)
 {
-	struct find_interface_arg argb;
-	int retval;
+	struct device *dev;
 
-	argb.minor = minor;
-	argb.interface = NULL;
-	/* eat the error, it will be in argb.interface */
-	retval = driver_for_each_device(&drv->drvwrap.driver, NULL, &argb,
-					__find_interface);
-	return argb.interface;
+	dev = bus_find_device(&usb_bus_type, NULL, &minor, __find_interface);
+
+	/* Drop reference count from bus_find_device */
+	put_device(dev);
+
+	return dev ? to_usb_interface(dev) : NULL;
 }
 EXPORT_SYMBOL_GPL(usb_find_interface);
 
@@ -1038,7 +1067,7 @@
 struct dentry *usb_debug_root;
 EXPORT_SYMBOL_GPL(usb_debug_root);
 
-struct dentry *usb_debug_devices;
+static struct dentry *usb_debug_devices;
 
 static int usb_debugfs_init(void)
 {
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 9a8b15e..4c36c7f 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -24,6 +24,7 @@
 extern int usb_deauthorize_device(struct usb_device *);
 extern int usb_authorize_device(struct usb_device *);
 extern void usb_detect_quirks(struct usb_device *udev);
+extern int usb_remove_device(struct usb_device *udev);
 
 extern int usb_get_device_descriptor(struct usb_device *dev,
 		unsigned int size);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index a18e3c5..ee41120 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -732,6 +732,24 @@
 	  behavior of USB Mass Storage hosts.  Not needed for
 	  normal operation.
 
+config USB_MASS_STORAGE
+	tristate "Mass Storage Gadget"
+	depends on BLOCK
+	help
+	  The Mass Storage Gadget acts as a USB Mass Storage disk drive.
+	  As its storage repository it can use a regular file or a block
+	  device (in much the same way as the "loop" device driver),
+	  specified as a module parameter or sysfs option.
+
+	  This is heavily based on File-backed Storage Gadget and in most
+	  cases you will want to use FSG instead.  This gadget is mostly
+	  here to test the functionality of the Mass Storage Function
+	  which may be used with composite framework.
+
+	  Say "y" to link the driver statically, or "m" to build
+	  a dynamically linked module called "g_file_storage".  If unsure,
+	  consider File-backed Storage Gadget.
+
 config USB_G_SERIAL
 	tristate "Serial Gadget (with CDC ACM and CDC OBEX support)"
 	help
@@ -794,6 +812,48 @@
 	  Say "y" to link the driver statically, or "m" to build a
 	  dynamically linked module.
 
+config USB_G_MULTI
+	tristate "Multifunction Composite Gadget (EXPERIMENTAL)"
+	depends on BLOCK && NET
+	help
+	  The Multifunction Composite Gadget provides Ethernet (RNDIS
+	  and/or CDC Ethernet), mass storage and ACM serial link
+	  interfaces.
+
+	  You will be asked to choose which of the two configurations is
+	  to be available in the gadget.  At least one configuration must
+	  be chosen to make the gadget usable.  Selecting more than one
+	  configuration will prevent Windows from automatically detecting
+	  the gadget as a composite gadget, so an INF file will be needed to
+	  use the gadget.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "g_multi".
+
+config USB_G_MULTI_RNDIS
+	bool "RNDIS + CDC Serial + Storage configuration"
+	depends on USB_G_MULTI
+	default y
+	help
+	  This option enables a configuration with RNDIS, CDC Serial and
+	  Mass Storage functions available in the Multifunction Composite
+	  Gadget.  This is the configuration dedicated for Windows since RNDIS
+	  is Microsoft's protocol.
+
+	  If unsure, say "y".
+
+config USB_G_MULTI_CDC
+	bool "CDC Ethernet + CDC Serial + Storage configuration"
+	depends on USB_G_MULTI
+	default n
+	help
+	  This option enables a configuration with CDC Ethernet (ECM), CDC
+	  Serial and Mass Storage functions available in the Multifunction
+	  Composite Gadget.
+
+	  If unsure, say "y".
+
+
 # put drivers that need isochronous transfer support (for audio
 # or video class gadget drivers), or specific hardware, here.
 
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 9d7b87c..2e2c047 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -39,16 +39,20 @@
 g_midi-objs			:= gmidi.o
 gadgetfs-objs			:= inode.o
 g_file_storage-objs		:= file_storage.o
+g_mass_storage-objs		:= mass_storage.o
 g_printer-objs			:= printer.o
 g_cdc-objs			:= cdc2.o
+g_multi-objs			:= multi.o
 
 obj-$(CONFIG_USB_ZERO)		+= g_zero.o
 obj-$(CONFIG_USB_AUDIO)		+= g_audio.o
 obj-$(CONFIG_USB_ETH)		+= g_ether.o
 obj-$(CONFIG_USB_GADGETFS)	+= gadgetfs.o
 obj-$(CONFIG_USB_FILE_STORAGE)	+= g_file_storage.o
+obj-$(CONFIG_USB_MASS_STORAGE)	+= g_mass_storage.o
 obj-$(CONFIG_USB_G_SERIAL)	+= g_serial.o
 obj-$(CONFIG_USB_G_PRINTER)	+= g_printer.o
 obj-$(CONFIG_USB_MIDI_GADGET)	+= g_midi.o
 obj-$(CONFIG_USB_CDC_COMPOSITE) += g_cdc.o
+obj-$(CONFIG_USB_G_MULTI)	+= g_multi.o
 
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 66450a1..043e04d 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -892,7 +892,7 @@
 
 			txvc |= AT91_UDP_TXVC_PUON;
 			at91_udp_write(udc, AT91_UDP_TXVC, txvc);
-		} else if (cpu_is_at91sam9261()) {
+		} else if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) {
 			u32	usbpucr;
 
 			usbpucr = at91_sys_read(AT91_MATRIX_USBPUCR);
@@ -910,7 +910,7 @@
 
 			txvc &= ~AT91_UDP_TXVC_PUON;
 			at91_udp_write(udc, AT91_UDP_TXVC, txvc);
-		} else if (cpu_is_at91sam9261()) {
+		} else if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) {
 			u32	usbpucr;
 
 			usbpucr = at91_sys_read(AT91_MATRIX_USBPUCR);
@@ -1692,7 +1692,7 @@
 		udc->ep[3].maxpacket = 64;
 		udc->ep[4].maxpacket = 512;
 		udc->ep[5].maxpacket = 512;
-	} else if (cpu_is_at91sam9261()) {
+	} else if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) {
 		udc->ep[3].maxpacket = 64;
 	} else if (cpu_is_at91sam9263()) {
 		udc->ep[0].maxpacket = 64;
diff --git a/drivers/usb/gadget/audio.c b/drivers/usb/gadget/audio.c
index a3a0f4a..58f2203 100644
--- a/drivers/usb/gadget/audio.c
+++ b/drivers/usb/gadget/audio.c
@@ -89,120 +89,6 @@
 
 /*-------------------------------------------------------------------------*/
 
-/**
- * Handle USB audio endpoint set/get command in setup class request
- */
-
-static int audio_set_endpoint_req(struct usb_configuration *c,
-		const struct usb_ctrlrequest *ctrl)
-{
-	struct usb_composite_dev *cdev = c->cdev;
-	int			value = -EOPNOTSUPP;
-	u16			ep = le16_to_cpu(ctrl->wIndex);
-	u16			len = le16_to_cpu(ctrl->wLength);
-	u16			w_value = le16_to_cpu(ctrl->wValue);
-
-	DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
-			ctrl->bRequest, w_value, len, ep);
-
-	switch (ctrl->bRequest) {
-	case UAC_SET_CUR:
-		value = 0;
-		break;
-
-	case UAC_SET_MIN:
-		break;
-
-	case UAC_SET_MAX:
-		break;
-
-	case UAC_SET_RES:
-		break;
-
-	case UAC_SET_MEM:
-		break;
-
-	default:
-		break;
-	}
-
-	return value;
-}
-
-static int audio_get_endpoint_req(struct usb_configuration *c,
-		const struct usb_ctrlrequest *ctrl)
-{
-	struct usb_composite_dev *cdev = c->cdev;
-	int value = -EOPNOTSUPP;
-	u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
-	u16 len = le16_to_cpu(ctrl->wLength);
-	u16 w_value = le16_to_cpu(ctrl->wValue);
-
-	DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
-			ctrl->bRequest, w_value, len, ep);
-
-	switch (ctrl->bRequest) {
-	case UAC_GET_CUR:
-	case UAC_GET_MIN:
-	case UAC_GET_MAX:
-	case UAC_GET_RES:
-		value = 3;
-		break;
-	case UAC_GET_MEM:
-		break;
-	default:
-		break;
-	}
-
-	return value;
-}
-
-static int
-audio_setup(struct usb_configuration *c, const struct usb_ctrlrequest *ctrl)
-{
-	struct usb_composite_dev *cdev = c->cdev;
-	struct usb_request *req = cdev->req;
-	int value = -EOPNOTSUPP;
-	u16 w_index = le16_to_cpu(ctrl->wIndex);
-	u16 w_value = le16_to_cpu(ctrl->wValue);
-	u16 w_length = le16_to_cpu(ctrl->wLength);
-
-	/* composite driver infrastructure handles everything except
-	 * Audio class messages; interface activation uses set_alt().
-	 */
-	switch (ctrl->bRequestType) {
-	case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
-		value = audio_set_endpoint_req(c, ctrl);
-		break;
-
-	case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
-		value = audio_get_endpoint_req(c, ctrl);
-		break;
-
-	default:
-		ERROR(cdev, "Invalid control req%02x.%02x v%04x i%04x l%d\n",
-			ctrl->bRequestType, ctrl->bRequest,
-			w_value, w_index, w_length);
-	}
-
-	/* respond with data transfer or status phase? */
-	if (value >= 0) {
-		DBG(cdev, "Audio req%02x.%02x v%04x i%04x l%d\n",
-			ctrl->bRequestType, ctrl->bRequest,
-			w_value, w_index, w_length);
-		req->zero = 0;
-		req->length = value;
-		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
-		if (value < 0)
-			ERROR(cdev, "Audio response on err %d\n", value);
-	}
-
-	/* device either stalls (value < 0) or reports success */
-	return value;
-}
-
-/*-------------------------------------------------------------------------*/
-
 static int __init audio_do_config(struct usb_configuration *c)
 {
 	/* FIXME alloc iConfiguration string, set it in c->strings */
@@ -220,7 +106,6 @@
 static struct usb_configuration audio_config_driver = {
 	.label			= DRIVER_DESC,
 	.bind			= audio_do_config,
-	.setup			= audio_setup,
 	.bConfigurationValue	= 1,
 	/* .iConfiguration = DYNAMIC */
 	.bmAttributes		= USB_CONFIG_ATT_SELFPOWER,
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index d05397e..09289bb 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -373,6 +373,8 @@
 	list_for_each_entry(f, &cdev->config->functions, list) {
 		if (f->disable)
 			f->disable(f);
+
+		bitmap_zero(f->endpoints, 32);
 	}
 	cdev->config = NULL;
 }
@@ -418,10 +420,35 @@
 	/* Initialize all interfaces by setting them to altsetting zero. */
 	for (tmp = 0; tmp < MAX_CONFIG_INTERFACES; tmp++) {
 		struct usb_function	*f = c->interface[tmp];
+		struct usb_descriptor_header **descriptors;
 
 		if (!f)
 			break;
 
+		/*
+		 * Record which endpoints are used by the function. This is used
+		 * to dispatch control requests targeted at that endpoint to the
+		 * function's setup callback instead of the current
+		 * configuration's setup callback.
+		 */
+		if (gadget->speed == USB_SPEED_HIGH)
+			descriptors = f->hs_descriptors;
+		else
+			descriptors = f->descriptors;
+
+		for (; *descriptors; ++descriptors) {
+			struct usb_endpoint_descriptor *ep;
+			int addr;
+
+			if ((*descriptors)->bDescriptorType != USB_DT_ENDPOINT)
+				continue;
+
+			ep = (struct usb_endpoint_descriptor *)*descriptors;
+			addr = ((ep->bEndpointAddress & 0x80) >> 3)
+			     |  (ep->bEndpointAddress & 0x0f);
+			set_bit(addr, f->endpoints);
+		}
+
 		result = f->set_alt(f, tmp, 0);
 		if (result < 0) {
 			DBG(cdev, "interface %d (%s/%p) alt 0 --> %d\n",
@@ -688,6 +715,7 @@
 	u16				w_value = le16_to_cpu(ctrl->wValue);
 	u16				w_length = le16_to_cpu(ctrl->wLength);
 	struct usb_function		*f = NULL;
+	u8				endp;
 
 	/* partial re-init of the response message; the function or the
 	 * gadget might need to intercept e.g. a control-OUT completion
@@ -800,23 +828,33 @@
 			ctrl->bRequestType, ctrl->bRequest,
 			w_value, w_index, w_length);
 
-		/* functions always handle their interfaces ... punt other
-		 * recipients (endpoint, other, WUSB, ...) to the current
+		/* functions always handle their interfaces and endpoints...
+		 * punt other recipients (other, WUSB, ...) to the current
 		 * configuration code.
 		 *
 		 * REVISIT it could make sense to let the composite device
 		 * take such requests too, if that's ever needed:  to work
 		 * in config 0, etc.
 		 */
-		if ((ctrl->bRequestType & USB_RECIP_MASK)
-				== USB_RECIP_INTERFACE) {
+		switch (ctrl->bRequestType & USB_RECIP_MASK) {
+		case USB_RECIP_INTERFACE:
 			f = cdev->config->interface[intf];
-			if (f && f->setup)
-				value = f->setup(f, ctrl);
-			else
+			break;
+
+		case USB_RECIP_ENDPOINT:
+			endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f);
+			list_for_each_entry(f, &cdev->config->functions, list) {
+				if (test_bit(endp, f->endpoints))
+					break;
+			}
+			if (&f->list == &cdev->config->functions)
 				f = NULL;
+			break;
 		}
-		if (value < 0 && !f) {
+
+		if (f && f->setup)
+			value = f->setup(f, ctrl);
+		else {
 			struct usb_configuration	*c;
 
 			c = cdev->config;
@@ -1054,7 +1092,8 @@
 	.speed		= USB_SPEED_HIGH,
 
 	.bind		= composite_bind,
-	.unbind		= __exit_p(composite_unbind),
+	/* .unbind		= __exit_p(composite_unbind), */
+	.unbind		= composite_unbind,
 
 	.setup		= composite_setup,
 	.disconnect	= composite_disconnect,
@@ -1103,7 +1142,7 @@
  * This function is used to unregister drivers using the composite
  * driver framework.
  */
-void __exit usb_composite_unregister(struct usb_composite_driver *driver)
+void /* __exit */ usb_composite_unregister(struct usb_composite_driver *driver)
 {
 	if (composite != driver)
 		return;
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 167cb2a..141372b 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -25,6 +25,14 @@
 #include <linux/kernel.h>
 #include <linux/utsname.h>
 
+
+#if defined USB_ETH_RNDIS
+#  undef USB_ETH_RNDIS
+#endif
+#ifdef CONFIG_USB_ETH_RNDIS
+#  define USB_ETH_RNDIS y
+#endif
+
 #include "u_ether.h"
 
 
@@ -66,7 +74,7 @@
 #define DRIVER_DESC		"Ethernet Gadget"
 #define DRIVER_VERSION		"Memorial Day 2008"
 
-#ifdef CONFIG_USB_ETH_RNDIS
+#ifdef USB_ETH_RNDIS
 #define PREFIX			"RNDIS/"
 #else
 #define PREFIX			""
@@ -87,7 +95,7 @@
 
 static inline bool has_rndis(void)
 {
-#ifdef	CONFIG_USB_ETH_RNDIS
+#ifdef	USB_ETH_RNDIS
 	return true;
 #else
 	return false;
@@ -110,7 +118,7 @@
 
 #include "f_ecm.c"
 #include "f_subset.c"
-#ifdef	CONFIG_USB_ETH_RNDIS
+#ifdef	USB_ETH_RNDIS
 #include "f_rndis.c"
 #include "rndis.c"
 #endif
@@ -251,7 +259,7 @@
 
 /*-------------------------------------------------------------------------*/
 
-#ifdef CONFIG_USB_ETH_EEM
+#ifdef USB_ETH_EEM
 static int use_eem = 1;
 #else
 static int use_eem;
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index 4e36578..d10353d 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -4,6 +4,8 @@
  * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
  * Copyright (C) 2008 by David Brownell
  * Copyright (C) 2008 by Nokia Corporation
+ * Copyright (C) 2009 by Samsung Electronics
+ * Author: Michal Nazarewicz (m.nazarewicz@samsung.com)
  *
  * This software is distributed under the terms of the GNU General
  * Public License ("GPL") as published by the Free Software Foundation,
@@ -99,6 +101,20 @@
 
 /* interface and class descriptors: */
 
+static struct usb_interface_assoc_descriptor
+acm_iad_descriptor = {
+	.bLength =		sizeof acm_iad_descriptor,
+	.bDescriptorType =	USB_DT_INTERFACE_ASSOCIATION,
+
+	/* .bFirstInterface =	DYNAMIC, */
+	.bInterfaceCount = 	2,	// control + data
+	.bFunctionClass =	USB_CLASS_COMM,
+	.bFunctionSubClass =	USB_CDC_SUBCLASS_ACM,
+	.bFunctionProtocol =	USB_CDC_PROTO_NONE,
+	/* .iFunction =		DYNAMIC */
+};
+
+
 static struct usb_interface_descriptor acm_control_interface_desc __initdata = {
 	.bLength =		USB_DT_INTERFACE_SIZE,
 	.bDescriptorType =	USB_DT_INTERFACE,
@@ -178,6 +194,7 @@
 };
 
 static struct usb_descriptor_header *acm_fs_function[] __initdata = {
+	(struct usb_descriptor_header *) &acm_iad_descriptor,
 	(struct usb_descriptor_header *) &acm_control_interface_desc,
 	(struct usb_descriptor_header *) &acm_header_desc,
 	(struct usb_descriptor_header *) &acm_call_mgmt_descriptor,
@@ -216,6 +233,7 @@
 };
 
 static struct usb_descriptor_header *acm_hs_function[] __initdata = {
+	(struct usb_descriptor_header *) &acm_iad_descriptor,
 	(struct usb_descriptor_header *) &acm_control_interface_desc,
 	(struct usb_descriptor_header *) &acm_header_desc,
 	(struct usb_descriptor_header *) &acm_call_mgmt_descriptor,
@@ -232,11 +250,13 @@
 
 #define ACM_CTRL_IDX	0
 #define ACM_DATA_IDX	1
+#define ACM_IAD_IDX	2
 
 /* static strings, in UTF-8 */
 static struct usb_string acm_string_defs[] = {
 	[ACM_CTRL_IDX].s = "CDC Abstract Control Model (ACM)",
 	[ACM_DATA_IDX].s = "CDC ACM Data",
+	[ACM_IAD_IDX ].s = "CDC Serial",
 	{  /* ZEROES END LIST */ },
 };
 
@@ -563,6 +583,7 @@
 	if (status < 0)
 		goto fail;
 	acm->ctrl_id = status;
+	acm_iad_descriptor.bFirstInterface = status;
 
 	acm_control_interface_desc.bInterfaceNumber = status;
 	acm_union_desc .bMasterInterface0 = status;
@@ -732,6 +753,13 @@
 		acm_string_defs[ACM_DATA_IDX].id = status;
 
 		acm_data_interface_desc.iInterface = status;
+
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		acm_string_defs[ACM_IAD_IDX].id = status;
+
+		acm_iad_descriptor.iFunction = status;
 	}
 
 	/* allocate and initialize one new instance */
diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c
index 98e9bb9..c43c89f 100644
--- a/drivers/usb/gadget/f_audio.c
+++ b/drivers/usb/gadget/f_audio.c
@@ -445,6 +445,70 @@
 	return len;
 }
 
+static int audio_set_endpoint_req(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int			value = -EOPNOTSUPP;
+	u16			ep = le16_to_cpu(ctrl->wIndex);
+	u16			len = le16_to_cpu(ctrl->wLength);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+
+	DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+			ctrl->bRequest, w_value, len, ep);
+
+	switch (ctrl->bRequest) {
+	case UAC_SET_CUR:
+		value = 0;
+		break;
+
+	case UAC_SET_MIN:
+		break;
+
+	case UAC_SET_MAX:
+		break;
+
+	case UAC_SET_RES:
+		break;
+
+	case UAC_SET_MEM:
+		break;
+
+	default:
+		break;
+	}
+
+	return value;
+}
+
+static int audio_get_endpoint_req(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int value = -EOPNOTSUPP;
+	u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+	u16 len = le16_to_cpu(ctrl->wLength);
+	u16 w_value = le16_to_cpu(ctrl->wValue);
+
+	DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+			ctrl->bRequest, w_value, len, ep);
+
+	switch (ctrl->bRequest) {
+	case UAC_GET_CUR:
+	case UAC_GET_MIN:
+	case UAC_GET_MAX:
+	case UAC_GET_RES:
+		value = 3;
+		break;
+	case UAC_GET_MEM:
+		break;
+	default:
+		break;
+	}
+
+	return value;
+}
+
 static int
 f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
 {
@@ -455,8 +519,8 @@
 	u16			w_value = le16_to_cpu(ctrl->wValue);
 	u16			w_length = le16_to_cpu(ctrl->wLength);
 
-	/* composite driver infrastructure handles everything except
-	 * Audio class messages; interface activation uses set_alt().
+	/* composite driver infrastructure handles everything; interface
+	 * activation uses set_alt().
 	 */
 	switch (ctrl->bRequestType) {
 	case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE:
@@ -467,6 +531,14 @@
 		value = audio_get_intf_req(f, ctrl);
 		break;
 
+	case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+		value = audio_set_endpoint_req(f, ctrl);
+		break;
+
+	case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+		value = audio_get_endpoint_req(f, ctrl);
+		break;
+
 	default:
 		ERROR(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
 			ctrl->bRequestType, ctrl->bRequest,
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
new file mode 100644
index 0000000..a37640e
--- /dev/null
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -0,0 +1,3091 @@
+/*
+ * f_mass_storage.c -- Mass Storage USB Composite Function
+ *
+ * Copyright (C) 2003-2008 Alan Stern
+ * Copyright (C) 2009 Samsung Electronics
+ *                    Author: Michal Nazarewicz <m.nazarewicz@samsung.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ *    to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/*
+ * The Mass Storage Function acts as a USB Mass Storage device,
+ * appearing to the host as a disk drive or as a CD-ROM drive.  In
+ * addition to providing an example of a genuinely useful composite
+ * function for a USB device, it also illustrates a technique of
+ * double-buffering for increased throughput.
+ *
+ * Function supports multiple logical units (LUNs).  Backing storage
+ * for each LUN is provided by a regular file or a block device.
+ * Access for each LUN can be limited to read-only.  Moreover, the
+ * function can indicate that LUN is removable and/or CD-ROM.  (The
+ * later implies read-only access.)
+ *
+ * MSF is configured by specifying a fsg_config structure.  It has the
+ * following fields:
+ *
+ *	nluns		Number of LUNs function have (anywhere from 1
+ *				to FSG_MAX_LUNS which is 8).
+ *	luns		An array of LUN configuration values.  This
+ *				should be filled for each LUN that
+ *				function will include (ie. for "nluns"
+ *				LUNs).  Each element of the array has
+ *				the following fields:
+ *	->filename	The path to the backing file for the LUN.
+ *				Required if LUN is not marked as
+ *				removable.
+ *	->ro		Flag specifying access to the LUN shall be
+ *				read-only.  This is implied if CD-ROM
+ *				emulation is enabled as well as when
+ *				it was impossible to open "filename"
+ *				in R/W mode.
+ *	->removable	Flag specifying that LUN shall be indicated as
+ *				being removable.
+ *	->cdrom		Flag specifying that LUN shall be reported as
+ *				being a CD-ROM.
+ *
+ *	lun_name_format	A printf-like format for names of the LUN
+ *				devices.  This determines how the
+ *				directory in sysfs will be named.
+ *				Unless you are using several MSFs in
+ *				a single gadget (as opposed to single
+ *				MSF in many configurations) you may
+ *				leave it as NULL (in which case
+ *				"lun%d" will be used).  In the format
+ *				you can use "%d" to index LUNs for
+ *				MSF's with more than one LUN.  (Beware
+ *				that there is only one integer given
+ *				as an argument for the format and
+ *				specifying invalid format may cause
+ *				unspecified behaviour.)
+ *	thread_name	Name of the kernel thread process used by the
+ *				MSF.  You can safely set it to NULL
+ *				(in which case default "file-storage"
+ *				will be used).
+ *
+ *	vendor_name
+ *	product_name
+ *	release		Information used as a reply to INQUIRY
+ *				request.  To use default set to NULL,
+ *				NULL, 0xffff respectively.  The first
+ *				field should be 8 and the second 16
+ *				characters or less.
+ *
+ *	can_stall	Set to permit function to halt bulk endpoints.
+ *				Disabled on some USB devices known not
+ *				to work correctly.  You should set it
+ *				to true.
+ *
+ * If "removable" is not set for a LUN then a backing file must be
+ * specified.  If it is set, then NULL filename means the LUN's medium
+ * is not loaded (an empty string as "filename" in the fsg_config
+ * structure causes error).  The CD-ROM emulation includes a single
+ * data track and no audio tracks; hence there need be only one
+ * backing file per LUN.  Note also that the CD-ROM block length is
+ * set to 512 rather than the more common value 2048.
+ *
+ *
+ * MSF includes support for module parameters.  If gadget using it
+ * decides to use it, the following module parameters will be
+ * available:
+ *
+ *	file=filename[,filename...]
+ *			Names of the files or block devices used for
+ *				backing storage.
+ *	ro=b[,b...]	Default false, boolean for read-only access.
+ *	removable=b[,b...]
+ *			Default true, boolean for removable media.
+ *	cdrom=b[,b...]	Default false, boolean for whether to emulate
+ *				a CD-ROM drive.
+ *	luns=N		Default N = number of filenames, number of
+ *				LUNs to support.
+ *	stall		Default determined according to the type of
+ *				USB device controller (usually true),
+ *				boolean to permit the driver to halt
+ *				bulk endpoints.
+ *
+ * The module parameters may be prefixed with some string.  You need
+ * to consult gadget's documentation or source to verify whether it is
+ * using those module parameters and if it does what are the prefixes
+ * (look for FSG_MODULE_PARAMETERS() macro usage, what's inside it is
+ * the prefix).
+ *
+ *
+ * Requirements are modest; only a bulk-in and a bulk-out endpoint are
+ * needed.  The memory requirement amounts to two 16K buffers, size
+ * configurable by a parameter.  Support is included for both
+ * full-speed and high-speed operation.
+ *
+ * Note that the driver is slightly non-portable in that it assumes a
+ * single memory/DMA buffer will be useable for bulk-in, bulk-out, and
+ * interrupt-in endpoints.  With most device controllers this isn't an
+ * issue, but there may be some with hardware restrictions that prevent
+ * a buffer from being used by more than one endpoint.
+ *
+ *
+ * The pathnames of the backing files and the ro settings are
+ * available in the attribute files "file" and "ro" in the lun<n> (or
+ * to be more precise in a directory which name comes from
+ * "lun_name_format" option!) subdirectory of the gadget's sysfs
+ * directory.  If the "removable" option is set, writing to these
+ * files will simulate ejecting/loading the medium (writing an empty
+ * line means eject) and adjusting a write-enable tab.  Changes to the
+ * ro setting are not allowed when the medium is loaded or if CD-ROM
+ * emulation is being used.
+ *
+ *
+ * This function is heavily based on "File-backed Storage Gadget" by
+ * Alan Stern which in turn is heavily based on "Gadget Zero" by David
+ * Brownell.  The driver's SCSI command interface was based on the
+ * "Information technology - Small Computer System Interface - 2"
+ * document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93,
+ * available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>.
+ * The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which
+ * was based on the "Universal Serial Bus Mass Storage Class UFI
+ * Command Specification" document, Revision 1.0, December 14, 1998,
+ * available at
+ * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
+ */
+
+
+/*
+ *				Driver Design
+ *
+ * The MSF is fairly straightforward.  There is a main kernel
+ * thread that handles most of the work.  Interrupt routines field
+ * callbacks from the controller driver: bulk- and interrupt-request
+ * completion notifications, endpoint-0 events, and disconnect events.
+ * Completion events are passed to the main thread by wakeup calls.  Many
+ * ep0 requests are handled at interrupt time, but SetInterface,
+ * SetConfiguration, and device reset requests are forwarded to the
+ * thread in the form of "exceptions" using SIGUSR1 signals (since they
+ * should interrupt any ongoing file I/O operations).
+ *
+ * The thread's main routine implements the standard command/data/status
+ * parts of a SCSI interaction.  It and its subroutines are full of tests
+ * for pending signals/exceptions -- all this polling is necessary since
+ * the kernel has no setjmp/longjmp equivalents.  (Maybe this is an
+ * indication that the driver really wants to be running in userspace.)
+ * An important point is that so long as the thread is alive it keeps an
+ * open reference to the backing file.  This will prevent unmounting
+ * the backing file's underlying filesystem and could cause problems
+ * during system shutdown, for example.  To prevent such problems, the
+ * thread catches INT, TERM, and KILL signals and converts them into
+ * an EXIT exception.
+ *
+ * In normal operation the main thread is started during the gadget's
+ * fsg_bind() callback and stopped during fsg_unbind().  But it can
+ * also exit when it receives a signal, and there's no point leaving
+ * the gadget running when the thread is dead.  At of this moment, MSF
+ * provides no way to deregister the gadget when thread dies -- maybe
+ * a callback functions is needed.
+ *
+ * To provide maximum throughput, the driver uses a circular pipeline of
+ * buffer heads (struct fsg_buffhd).  In principle the pipeline can be
+ * arbitrarily long; in practice the benefits don't justify having more
+ * than 2 stages (i.e., double buffering).  But it helps to think of the
+ * pipeline as being a long one.  Each buffer head contains a bulk-in and
+ * a bulk-out request pointer (since the buffer can be used for both
+ * output and input -- directions always are given from the host's
+ * point of view) as well as a pointer to the buffer and various state
+ * variables.
+ *
+ * Use of the pipeline follows a simple protocol.  There is a variable
+ * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
+ * At any time that buffer head may still be in use from an earlier
+ * request, so each buffer head has a state variable indicating whether
+ * it is EMPTY, FULL, or BUSY.  Typical use involves waiting for the
+ * buffer head to be EMPTY, filling the buffer either by file I/O or by
+ * USB I/O (during which the buffer head is BUSY), and marking the buffer
+ * head FULL when the I/O is complete.  Then the buffer will be emptied
+ * (again possibly by USB I/O, during which it is marked BUSY) and
+ * finally marked EMPTY again (possibly by a completion routine).
+ *
+ * A module parameter tells the driver to avoid stalling the bulk
+ * endpoints wherever the transport specification allows.  This is
+ * necessary for some UDCs like the SuperH, which cannot reliably clear a
+ * halt on a bulk endpoint.  However, under certain circumstances the
+ * Bulk-only specification requires a stall.  In such cases the driver
+ * will halt the endpoint and set a flag indicating that it should clear
+ * the halt in software during the next device reset.  Hopefully this
+ * will permit everything to work correctly.  Furthermore, although the
+ * specification allows the bulk-out endpoint to halt when the host sends
+ * too much data, implementing this would cause an unavoidable race.
+ * The driver will always use the "no-stall" approach for OUT transfers.
+ *
+ * One subtle point concerns sending status-stage responses for ep0
+ * requests.  Some of these requests, such as device reset, can involve
+ * interrupting an ongoing file I/O operation, which might take an
+ * arbitrarily long time.  During that delay the host might give up on
+ * the original ep0 request and issue a new one.  When that happens the
+ * driver should not notify the host about completion of the original
+ * request, as the host will no longer be waiting for it.  So the driver
+ * assigns to each ep0 request a unique tag, and it keeps track of the
+ * tag value of the request associated with a long-running exception
+ * (device-reset, interface-change, or configuration-change).  When the
+ * exception handler is finished, the status-stage response is submitted
+ * only if the current ep0 request tag is equal to the exception request
+ * tag.  Thus only the most recently received ep0 request will get a
+ * status-stage response.
+ *
+ * Warning: This driver source file is too long.  It ought to be split up
+ * into a header file plus about 3 separate .c files, to handle the details
+ * of the Gadget, USB Mass Storage, and SCSI protocols.
+ */
+
+
+/* #define VERBOSE_DEBUG */
+/* #define DUMP_MSGS */
+
+
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/dcache.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fcntl.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/kref.h>
+#include <linux/kthread.h>
+#include <linux/limits.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/freezer.h>
+#include <linux/utsname.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include "gadget_chips.h"
+
+
+
+/*------------------------------------------------------------------------*/
+
+#define FSG_DRIVER_DESC		"Mass Storage Function"
+#define FSG_DRIVER_VERSION	"2009/09/11"
+
+static const char fsg_string_interface[] = "Mass Storage";
+
+
+#define FSG_NO_INTR_EP 1
+#define FSG_BUFFHD_STATIC_BUFFER 1
+#define FSG_NO_DEVICE_STRINGS    1
+#define FSG_NO_OTG               1
+#define FSG_NO_INTR_EP           1
+
+#include "storage_common.c"
+
+
+/*-------------------------------------------------------------------------*/
+
+struct fsg_dev;
+
+
+/* Data shared by all the FSG instances. */
+struct fsg_common {
+	struct usb_gadget	*gadget;
+	struct fsg_dev		*fsg;
+	struct fsg_dev		*prev_fsg;
+
+	/* filesem protects: backing files in use */
+	struct rw_semaphore	filesem;
+
+	/* lock protects: state, all the req_busy's */
+	spinlock_t		lock;
+
+	struct usb_ep		*ep0;		/* Copy of gadget->ep0 */
+	struct usb_request	*ep0req;	/* Copy of cdev->req */
+	unsigned int		ep0_req_tag;
+	const char		*ep0req_name;
+
+	struct fsg_buffhd	*next_buffhd_to_fill;
+	struct fsg_buffhd	*next_buffhd_to_drain;
+	struct fsg_buffhd	buffhds[FSG_NUM_BUFFERS];
+
+	int			cmnd_size;
+	u8			cmnd[MAX_COMMAND_SIZE];
+
+	unsigned int		nluns;
+	unsigned int		lun;
+	struct fsg_lun		*luns;
+	struct fsg_lun		*curlun;
+
+	unsigned int		bulk_out_maxpacket;
+	enum fsg_state		state;		/* For exception handling */
+	unsigned int		exception_req_tag;
+
+	u8			config, new_config;
+	enum data_direction	data_dir;
+	u32			data_size;
+	u32			data_size_from_cmnd;
+	u32			tag;
+	u32			residue;
+	u32			usb_amount_left;
+
+	unsigned int		can_stall:1;
+	unsigned int		free_storage_on_release:1;
+	unsigned int		phase_error:1;
+	unsigned int		short_packet_received:1;
+	unsigned int		bad_lun_okay:1;
+	unsigned int		running:1;
+
+	int			thread_wakeup_needed;
+	struct completion	thread_notifier;
+	struct task_struct	*thread_task;
+
+	/* Callback function to call when thread exits. */
+	void			(*thread_exits)(struct fsg_common *common);
+	/* Gadget's private data. */
+	void			*private_data;
+
+	/* Vendor (8 chars), product (16 chars), release (4
+	 * hexadecimal digits) and NUL byte */
+	char inquiry_string[8 + 16 + 4 + 1];
+
+	struct kref		ref;
+};
+
+
+struct fsg_config {
+	unsigned nluns;
+	struct fsg_lun_config {
+		const char *filename;
+		char ro;
+		char removable;
+		char cdrom;
+	} luns[FSG_MAX_LUNS];
+
+	const char		*lun_name_format;
+	const char		*thread_name;
+
+	/* Callback function to call when thread exits. */
+	void			(*thread_exits)(struct fsg_common *common);
+	/* Gadget's private data. */
+	void			*private_data;
+
+	const char *vendor_name;		/*  8 characters or less */
+	const char *product_name;		/* 16 characters or less */
+	u16 release;
+
+	char			can_stall;
+};
+
+
+struct fsg_dev {
+	struct usb_function	function;
+	struct usb_gadget	*gadget;	/* Copy of cdev->gadget */
+	struct fsg_common	*common;
+
+	u16			interface_number;
+
+	unsigned int		bulk_in_enabled:1;
+	unsigned int		bulk_out_enabled:1;
+
+	unsigned long		atomic_bitflags;
+#define IGNORE_BULK_OUT		0
+
+	struct usb_ep		*bulk_in;
+	struct usb_ep		*bulk_out;
+};
+
+
+static inline int __fsg_is_set(struct fsg_common *common,
+			       const char *func, unsigned line)
+{
+	if (common->fsg)
+		return 1;
+	ERROR(common, "common->fsg is NULL in %s at %u\n", func, line);
+	return 0;
+}
+
+#define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__))
+
+
+static inline struct fsg_dev *fsg_from_func(struct usb_function *f)
+{
+	return container_of(f, struct fsg_dev, function);
+}
+
+
+typedef void (*fsg_routine_t)(struct fsg_dev *);
+
+static int exception_in_progress(struct fsg_common *common)
+{
+	return common->state > FSG_STATE_IDLE;
+}
+
+/* Make bulk-out requests be divisible by the maxpacket size */
+static void set_bulk_out_req_length(struct fsg_common *common,
+		struct fsg_buffhd *bh, unsigned int length)
+{
+	unsigned int	rem;
+
+	bh->bulk_out_intended_length = length;
+	rem = length % common->bulk_out_maxpacket;
+	if (rem > 0)
+		length += common->bulk_out_maxpacket - rem;
+	bh->outreq->length = length;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
+{
+	const char	*name;
+
+	if (ep == fsg->bulk_in)
+		name = "bulk-in";
+	else if (ep == fsg->bulk_out)
+		name = "bulk-out";
+	else
+		name = ep->name;
+	DBG(fsg, "%s set halt\n", name);
+	return usb_ep_set_halt(ep);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* These routines may be called in process context or in_irq */
+
+/* Caller must hold fsg->lock */
+static void wakeup_thread(struct fsg_common *common)
+{
+	/* Tell the main thread that something has happened */
+	common->thread_wakeup_needed = 1;
+	if (common->thread_task)
+		wake_up_process(common->thread_task);
+}
+
+
+static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
+{
+	unsigned long		flags;
+
+	/* Do nothing if a higher-priority exception is already in progress.
+	 * If a lower-or-equal priority exception is in progress, preempt it
+	 * and notify the main thread by sending it a signal. */
+	spin_lock_irqsave(&common->lock, flags);
+	if (common->state <= new_state) {
+		common->exception_req_tag = common->ep0_req_tag;
+		common->state = new_state;
+		if (common->thread_task)
+			send_sig_info(SIGUSR1, SEND_SIG_FORCED,
+				      common->thread_task);
+	}
+	spin_unlock_irqrestore(&common->lock, flags);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int ep0_queue(struct fsg_common *common)
+{
+	int	rc;
+
+	rc = usb_ep_queue(common->ep0, common->ep0req, GFP_ATOMIC);
+	common->ep0->driver_data = common;
+	if (rc != 0 && rc != -ESHUTDOWN) {
+		/* We can't do much more than wait for a reset */
+		WARNING(common, "error in submission: %s --> %d\n",
+			common->ep0->name, rc);
+	}
+	return rc;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Bulk and interrupt endpoint completion handlers.
+ * These always run in_irq. */
+
+static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct fsg_common	*common = ep->driver_data;
+	struct fsg_buffhd	*bh = req->context;
+
+	if (req->status || req->actual != req->length)
+		DBG(common, "%s --> %d, %u/%u\n", __func__,
+				req->status, req->actual, req->length);
+	if (req->status == -ECONNRESET)		/* Request was cancelled */
+		usb_ep_fifo_flush(ep);
+
+	/* Hold the lock while we update the request and buffer states */
+	smp_wmb();
+	spin_lock(&common->lock);
+	bh->inreq_busy = 0;
+	bh->state = BUF_STATE_EMPTY;
+	wakeup_thread(common);
+	spin_unlock(&common->lock);
+}
+
+static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct fsg_common	*common = ep->driver_data;
+	struct fsg_buffhd	*bh = req->context;
+
+	dump_msg(common, "bulk-out", req->buf, req->actual);
+	if (req->status || req->actual != bh->bulk_out_intended_length)
+		DBG(common, "%s --> %d, %u/%u\n", __func__,
+				req->status, req->actual,
+				bh->bulk_out_intended_length);
+	if (req->status == -ECONNRESET)		/* Request was cancelled */
+		usb_ep_fifo_flush(ep);
+
+	/* Hold the lock while we update the request and buffer states */
+	smp_wmb();
+	spin_lock(&common->lock);
+	bh->outreq_busy = 0;
+	bh->state = BUF_STATE_FULL;
+	wakeup_thread(common);
+	spin_unlock(&common->lock);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Ep0 class-specific handlers.  These always run in_irq. */
+
+static int fsg_setup(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct fsg_dev		*fsg = fsg_from_func(f);
+	struct usb_request	*req = fsg->common->ep0req;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+
+	if (!fsg->common->config)
+		return -EOPNOTSUPP;
+
+	switch (ctrl->bRequest) {
+
+	case USB_BULK_RESET_REQUEST:
+		if (ctrl->bRequestType !=
+		    (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
+			break;
+		if (w_index != fsg->interface_number || w_value != 0)
+			return -EDOM;
+
+		/* Raise an exception to stop the current operation
+		 * and reinitialize our state. */
+		DBG(fsg, "bulk reset request\n");
+		raise_exception(fsg->common, FSG_STATE_RESET);
+		return DELAYED_STATUS;
+
+	case USB_BULK_GET_MAX_LUN_REQUEST:
+		if (ctrl->bRequestType !=
+		    (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
+			break;
+		if (w_index != fsg->interface_number || w_value != 0)
+			return -EDOM;
+		VDBG(fsg, "get max LUN\n");
+		*(u8 *) req->buf = fsg->common->nluns - 1;
+		return 1;
+	}
+
+	VDBG(fsg,
+	     "unknown class-specific control req "
+	     "%02x.%02x v%04x i%04x l%u\n",
+	     ctrl->bRequestType, ctrl->bRequest,
+	     le16_to_cpu(ctrl->wValue), w_index, w_length);
+	return -EOPNOTSUPP;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* All the following routines run in process context */
+
+
+/* Use this for bulk or interrupt transfers, not ep0 */
+static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
+		struct usb_request *req, int *pbusy,
+		enum fsg_buffer_state *state)
+{
+	int	rc;
+
+	if (ep == fsg->bulk_in)
+		dump_msg(fsg, "bulk-in", req->buf, req->length);
+
+	spin_lock_irq(&fsg->common->lock);
+	*pbusy = 1;
+	*state = BUF_STATE_BUSY;
+	spin_unlock_irq(&fsg->common->lock);
+	rc = usb_ep_queue(ep, req, GFP_KERNEL);
+	if (rc != 0) {
+		*pbusy = 0;
+		*state = BUF_STATE_EMPTY;
+
+		/* We can't do much more than wait for a reset */
+
+		/* Note: currently the net2280 driver fails zero-length
+		 * submissions if DMA is enabled. */
+		if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
+						req->length == 0))
+			WARNING(fsg, "error in submission: %s --> %d\n",
+					ep->name, rc);
+	}
+}
+
+#define START_TRANSFER_OR(common, ep_name, req, pbusy, state)		\
+	if (fsg_is_set(common))						\
+		start_transfer((common)->fsg, (common)->fsg->ep_name,	\
+			       req, pbusy, state);			\
+	else
+
+#define START_TRANSFER(common, ep_name, req, pbusy, state)		\
+	START_TRANSFER_OR(common, ep_name, req, pbusy, state) (void)0
+
+
+
+static int sleep_thread(struct fsg_common *common)
+{
+	int	rc = 0;
+
+	/* Wait until a signal arrives or we are woken up */
+	for (;;) {
+		try_to_freeze();
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (signal_pending(current)) {
+			rc = -EINTR;
+			break;
+		}
+		if (common->thread_wakeup_needed)
+			break;
+		schedule();
+	}
+	__set_current_state(TASK_RUNNING);
+	common->thread_wakeup_needed = 0;
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_read(struct fsg_common *common)
+{
+	struct fsg_lun		*curlun = common->curlun;
+	u32			lba;
+	struct fsg_buffhd	*bh;
+	int			rc;
+	u32			amount_left;
+	loff_t			file_offset, file_offset_tmp;
+	unsigned int		amount;
+	unsigned int		partial_page;
+	ssize_t			nread;
+
+	/* Get the starting Logical Block Address and check that it's
+	 * not too big */
+	if (common->cmnd[0] == SC_READ_6)
+		lba = get_unaligned_be24(&common->cmnd[1]);
+	else {
+		lba = get_unaligned_be32(&common->cmnd[2]);
+
+		/* We allow DPO (Disable Page Out = don't save data in the
+		 * cache) and FUA (Force Unit Access = don't read from the
+		 * cache), but we don't implement them. */
+		if ((common->cmnd[1] & ~0x18) != 0) {
+			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+			return -EINVAL;
+		}
+	}
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+	file_offset = ((loff_t) lba) << 9;
+
+	/* Carry out the file reads */
+	amount_left = common->data_size_from_cmnd;
+	if (unlikely(amount_left == 0))
+		return -EIO;		/* No default reply */
+
+	for (;;) {
+
+		/* Figure out how much we need to read:
+		 * Try to read the remaining amount.
+		 * But don't read more than the buffer size.
+		 * And don't try to read past the end of the file.
+		 * Finally, if we're not at a page boundary, don't read past
+		 *	the next page.
+		 * If this means reading 0 then we were asked to read past
+		 *	the end of file. */
+		amount = min(amount_left, FSG_BUFLEN);
+		amount = min((loff_t) amount,
+				curlun->file_length - file_offset);
+		partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
+		if (partial_page > 0)
+			amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
+					partial_page);
+
+		/* Wait for the next buffer to become available */
+		bh = common->next_buffhd_to_fill;
+		while (bh->state != BUF_STATE_EMPTY) {
+			rc = sleep_thread(common);
+			if (rc)
+				return rc;
+		}
+
+		/* If we were asked to read past the end of file,
+		 * end with an empty buffer. */
+		if (amount == 0) {
+			curlun->sense_data =
+					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+			curlun->sense_data_info = file_offset >> 9;
+			curlun->info_valid = 1;
+			bh->inreq->length = 0;
+			bh->state = BUF_STATE_FULL;
+			break;
+		}
+
+		/* Perform the read */
+		file_offset_tmp = file_offset;
+		nread = vfs_read(curlun->filp,
+				(char __user *) bh->buf,
+				amount, &file_offset_tmp);
+		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
+				(unsigned long long) file_offset,
+				(int) nread);
+		if (signal_pending(current))
+			return -EINTR;
+
+		if (nread < 0) {
+			LDBG(curlun, "error in file read: %d\n",
+					(int) nread);
+			nread = 0;
+		} else if (nread < amount) {
+			LDBG(curlun, "partial file read: %d/%u\n",
+					(int) nread, amount);
+			nread -= (nread & 511);	/* Round down to a block */
+		}
+		file_offset  += nread;
+		amount_left  -= nread;
+		common->residue -= nread;
+		bh->inreq->length = nread;
+		bh->state = BUF_STATE_FULL;
+
+		/* If an error occurred, report it and its position */
+		if (nread < amount) {
+			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
+			curlun->sense_data_info = file_offset >> 9;
+			curlun->info_valid = 1;
+			break;
+		}
+
+		if (amount_left == 0)
+			break;		/* No more left to read */
+
+		/* Send this buffer and go read some more */
+		bh->inreq->zero = 0;
+		START_TRANSFER_OR(common, bulk_in, bh->inreq,
+			       &bh->inreq_busy, &bh->state)
+			/* Don't know what to do if
+			 * common->fsg is NULL */
+			return -EIO;
+		common->next_buffhd_to_fill = bh->next;
+	}
+
+	return -EIO;		/* No default reply */
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_write(struct fsg_common *common)
+{
+	struct fsg_lun		*curlun = common->curlun;
+	u32			lba;
+	struct fsg_buffhd	*bh;
+	int			get_some_more;
+	u32			amount_left_to_req, amount_left_to_write;
+	loff_t			usb_offset, file_offset, file_offset_tmp;
+	unsigned int		amount;
+	unsigned int		partial_page;
+	ssize_t			nwritten;
+	int			rc;
+
+	if (curlun->ro) {
+		curlun->sense_data = SS_WRITE_PROTECTED;
+		return -EINVAL;
+	}
+	spin_lock(&curlun->filp->f_lock);
+	curlun->filp->f_flags &= ~O_SYNC;	/* Default is not to wait */
+	spin_unlock(&curlun->filp->f_lock);
+
+	/* Get the starting Logical Block Address and check that it's
+	 * not too big */
+	if (common->cmnd[0] == SC_WRITE_6)
+		lba = get_unaligned_be24(&common->cmnd[1]);
+	else {
+		lba = get_unaligned_be32(&common->cmnd[2]);
+
+		/* We allow DPO (Disable Page Out = don't save data in the
+		 * cache) and FUA (Force Unit Access = write directly to the
+		 * medium).  We don't implement DPO; we implement FUA by
+		 * performing synchronous output. */
+		if (common->cmnd[1] & ~0x18) {
+			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+			return -EINVAL;
+		}
+		if (common->cmnd[1] & 0x08) {	/* FUA */
+			spin_lock(&curlun->filp->f_lock);
+			curlun->filp->f_flags |= O_SYNC;
+			spin_unlock(&curlun->filp->f_lock);
+		}
+	}
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+
+	/* Carry out the file writes */
+	get_some_more = 1;
+	file_offset = usb_offset = ((loff_t) lba) << 9;
+	amount_left_to_req = common->data_size_from_cmnd;
+	amount_left_to_write = common->data_size_from_cmnd;
+
+	while (amount_left_to_write > 0) {
+
+		/* Queue a request for more data from the host */
+		bh = common->next_buffhd_to_fill;
+		if (bh->state == BUF_STATE_EMPTY && get_some_more) {
+
+			/* Figure out how much we want to get:
+			 * Try to get the remaining amount.
+			 * But don't get more than the buffer size.
+			 * And don't try to go past the end of the file.
+			 * If we're not at a page boundary,
+			 *	don't go past the next page.
+			 * If this means getting 0, then we were asked
+			 *	to write past the end of file.
+			 * Finally, round down to a block boundary. */
+			amount = min(amount_left_to_req, FSG_BUFLEN);
+			amount = min((loff_t) amount, curlun->file_length -
+					usb_offset);
+			partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
+			if (partial_page > 0)
+				amount = min(amount,
+	(unsigned int) PAGE_CACHE_SIZE - partial_page);
+
+			if (amount == 0) {
+				get_some_more = 0;
+				curlun->sense_data =
+					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+				curlun->sense_data_info = usb_offset >> 9;
+				curlun->info_valid = 1;
+				continue;
+			}
+			amount -= (amount & 511);
+			if (amount == 0) {
+
+				/* Why were we were asked to transfer a
+				 * partial block? */
+				get_some_more = 0;
+				continue;
+			}
+
+			/* Get the next buffer */
+			usb_offset += amount;
+			common->usb_amount_left -= amount;
+			amount_left_to_req -= amount;
+			if (amount_left_to_req == 0)
+				get_some_more = 0;
+
+			/* amount is always divisible by 512, hence by
+			 * the bulk-out maxpacket size */
+			bh->outreq->length = amount;
+			bh->bulk_out_intended_length = amount;
+			bh->outreq->short_not_ok = 1;
+			START_TRANSFER_OR(common, bulk_out, bh->outreq,
+					  &bh->outreq_busy, &bh->state)
+				/* Don't know what to do if
+				 * common->fsg is NULL */
+				return -EIO;
+			common->next_buffhd_to_fill = bh->next;
+			continue;
+		}
+
+		/* Write the received data to the backing file */
+		bh = common->next_buffhd_to_drain;
+		if (bh->state == BUF_STATE_EMPTY && !get_some_more)
+			break;			/* We stopped early */
+		if (bh->state == BUF_STATE_FULL) {
+			smp_rmb();
+			common->next_buffhd_to_drain = bh->next;
+			bh->state = BUF_STATE_EMPTY;
+
+			/* Did something go wrong with the transfer? */
+			if (bh->outreq->status != 0) {
+				curlun->sense_data = SS_COMMUNICATION_FAILURE;
+				curlun->sense_data_info = file_offset >> 9;
+				curlun->info_valid = 1;
+				break;
+			}
+
+			amount = bh->outreq->actual;
+			if (curlun->file_length - file_offset < amount) {
+				LERROR(curlun,
+	"write %u @ %llu beyond end %llu\n",
+	amount, (unsigned long long) file_offset,
+	(unsigned long long) curlun->file_length);
+				amount = curlun->file_length - file_offset;
+			}
+
+			/* Perform the write */
+			file_offset_tmp = file_offset;
+			nwritten = vfs_write(curlun->filp,
+					(char __user *) bh->buf,
+					amount, &file_offset_tmp);
+			VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
+					(unsigned long long) file_offset,
+					(int) nwritten);
+			if (signal_pending(current))
+				return -EINTR;		/* Interrupted! */
+
+			if (nwritten < 0) {
+				LDBG(curlun, "error in file write: %d\n",
+						(int) nwritten);
+				nwritten = 0;
+			} else if (nwritten < amount) {
+				LDBG(curlun, "partial file write: %d/%u\n",
+						(int) nwritten, amount);
+				nwritten -= (nwritten & 511);
+				/* Round down to a block */
+			}
+			file_offset += nwritten;
+			amount_left_to_write -= nwritten;
+			common->residue -= nwritten;
+
+			/* If an error occurred, report it and its position */
+			if (nwritten < amount) {
+				curlun->sense_data = SS_WRITE_ERROR;
+				curlun->sense_data_info = file_offset >> 9;
+				curlun->info_valid = 1;
+				break;
+			}
+
+			/* Did the host decide to stop early? */
+			if (bh->outreq->actual != bh->outreq->length) {
+				common->short_packet_received = 1;
+				break;
+			}
+			continue;
+		}
+
+		/* Wait for something to happen */
+		rc = sleep_thread(common);
+		if (rc)
+			return rc;
+	}
+
+	return -EIO;		/* No default reply */
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_synchronize_cache(struct fsg_common *common)
+{
+	struct fsg_lun	*curlun = common->curlun;
+	int		rc;
+
+	/* We ignore the requested LBA and write out all file's
+	 * dirty data buffers. */
+	rc = fsg_lun_fsync_sub(curlun);
+	if (rc)
+		curlun->sense_data = SS_WRITE_ERROR;
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static void invalidate_sub(struct fsg_lun *curlun)
+{
+	struct file	*filp = curlun->filp;
+	struct inode	*inode = filp->f_path.dentry->d_inode;
+	unsigned long	rc;
+
+	rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
+	VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc);
+}
+
+static int do_verify(struct fsg_common *common)
+{
+	struct fsg_lun		*curlun = common->curlun;
+	u32			lba;
+	u32			verification_length;
+	struct fsg_buffhd	*bh = common->next_buffhd_to_fill;
+	loff_t			file_offset, file_offset_tmp;
+	u32			amount_left;
+	unsigned int		amount;
+	ssize_t			nread;
+
+	/* Get the starting Logical Block Address and check that it's
+	 * not too big */
+	lba = get_unaligned_be32(&common->cmnd[2]);
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+
+	/* We allow DPO (Disable Page Out = don't save data in the
+	 * cache) but we don't implement it. */
+	if (common->cmnd[1] & ~0x10) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	verification_length = get_unaligned_be16(&common->cmnd[7]);
+	if (unlikely(verification_length == 0))
+		return -EIO;		/* No default reply */
+
+	/* Prepare to carry out the file verify */
+	amount_left = verification_length << 9;
+	file_offset = ((loff_t) lba) << 9;
+
+	/* Write out all the dirty buffers before invalidating them */
+	fsg_lun_fsync_sub(curlun);
+	if (signal_pending(current))
+		return -EINTR;
+
+	invalidate_sub(curlun);
+	if (signal_pending(current))
+		return -EINTR;
+
+	/* Just try to read the requested blocks */
+	while (amount_left > 0) {
+
+		/* Figure out how much we need to read:
+		 * Try to read the remaining amount, but not more than
+		 * the buffer size.
+		 * And don't try to read past the end of the file.
+		 * If this means reading 0 then we were asked to read
+		 * past the end of file. */
+		amount = min(amount_left, FSG_BUFLEN);
+		amount = min((loff_t) amount,
+				curlun->file_length - file_offset);
+		if (amount == 0) {
+			curlun->sense_data =
+					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+			curlun->sense_data_info = file_offset >> 9;
+			curlun->info_valid = 1;
+			break;
+		}
+
+		/* Perform the read */
+		file_offset_tmp = file_offset;
+		nread = vfs_read(curlun->filp,
+				(char __user *) bh->buf,
+				amount, &file_offset_tmp);
+		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
+				(unsigned long long) file_offset,
+				(int) nread);
+		if (signal_pending(current))
+			return -EINTR;
+
+		if (nread < 0) {
+			LDBG(curlun, "error in file verify: %d\n",
+					(int) nread);
+			nread = 0;
+		} else if (nread < amount) {
+			LDBG(curlun, "partial file verify: %d/%u\n",
+					(int) nread, amount);
+			nread -= (nread & 511);	/* Round down to a sector */
+		}
+		if (nread == 0) {
+			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
+			curlun->sense_data_info = file_offset >> 9;
+			curlun->info_valid = 1;
+			break;
+		}
+		file_offset += nread;
+		amount_left -= nread;
+	}
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	struct fsg_lun *curlun = common->curlun;
+	u8	*buf = (u8 *) bh->buf;
+
+	if (!curlun) {		/* Unsupported LUNs are okay */
+		common->bad_lun_okay = 1;
+		memset(buf, 0, 36);
+		buf[0] = 0x7f;		/* Unsupported, no device-type */
+		buf[4] = 31;		/* Additional length */
+		return 36;
+	}
+
+	buf[0] = curlun->cdrom ? TYPE_CDROM : TYPE_DISK;
+	buf[1] = curlun->removable ? 0x80 : 0;
+	buf[2] = 2;		/* ANSI SCSI level 2 */
+	buf[3] = 2;		/* SCSI-2 INQUIRY data format */
+	buf[4] = 31;		/* Additional length */
+	buf[5] = 0;		/* No special options */
+	buf[6] = 0;
+	buf[7] = 0;
+	memcpy(buf + 8, common->inquiry_string, sizeof common->inquiry_string);
+	return 36;
+}
+
+
+static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = common->curlun;
+	u8		*buf = (u8 *) bh->buf;
+	u32		sd, sdinfo;
+	int		valid;
+
+	/*
+	 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
+	 *
+	 * If a REQUEST SENSE command is received from an initiator
+	 * with a pending unit attention condition (before the target
+	 * generates the contingent allegiance condition), then the
+	 * target shall either:
+	 *   a) report any pending sense data and preserve the unit
+	 *	attention condition on the logical unit, or,
+	 *   b) report the unit attention condition, may discard any
+	 *	pending sense data, and clear the unit attention
+	 *	condition on the logical unit for that initiator.
+	 *
+	 * FSG normally uses option a); enable this code to use option b).
+	 */
+#if 0
+	if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
+		curlun->sense_data = curlun->unit_attention_data;
+		curlun->unit_attention_data = SS_NO_SENSE;
+	}
+#endif
+
+	if (!curlun) {		/* Unsupported LUNs are okay */
+		common->bad_lun_okay = 1;
+		sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
+		sdinfo = 0;
+		valid = 0;
+	} else {
+		sd = curlun->sense_data;
+		sdinfo = curlun->sense_data_info;
+		valid = curlun->info_valid << 7;
+		curlun->sense_data = SS_NO_SENSE;
+		curlun->sense_data_info = 0;
+		curlun->info_valid = 0;
+	}
+
+	memset(buf, 0, 18);
+	buf[0] = valid | 0x70;			/* Valid, current error */
+	buf[2] = SK(sd);
+	put_unaligned_be32(sdinfo, &buf[3]);	/* Sense information */
+	buf[7] = 18 - 8;			/* Additional sense length */
+	buf[12] = ASC(sd);
+	buf[13] = ASCQ(sd);
+	return 18;
+}
+
+
+static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = common->curlun;
+	u32		lba = get_unaligned_be32(&common->cmnd[2]);
+	int		pmi = common->cmnd[8];
+	u8		*buf = (u8 *) bh->buf;
+
+	/* Check the PMI and LBA fields */
+	if (pmi > 1 || (pmi == 0 && lba != 0)) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
+						/* Max logical block */
+	put_unaligned_be32(512, &buf[4]);	/* Block length */
+	return 8;
+}
+
+
+static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = common->curlun;
+	int		msf = common->cmnd[1] & 0x02;
+	u32		lba = get_unaligned_be32(&common->cmnd[2]);
+	u8		*buf = (u8 *) bh->buf;
+
+	if (common->cmnd[1] & ~0x02) {		/* Mask away MSF */
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+
+	memset(buf, 0, 8);
+	buf[0] = 0x01;		/* 2048 bytes of user data, rest is EC */
+	store_cdrom_address(&buf[4], msf, lba);
+	return 8;
+}
+
+
+static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = common->curlun;
+	int		msf = common->cmnd[1] & 0x02;
+	int		start_track = common->cmnd[6];
+	u8		*buf = (u8 *) bh->buf;
+
+	if ((common->cmnd[1] & ~0x02) != 0 ||	/* Mask away MSF */
+			start_track > 1) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	memset(buf, 0, 20);
+	buf[1] = (20-2);		/* TOC data length */
+	buf[2] = 1;			/* First track number */
+	buf[3] = 1;			/* Last track number */
+	buf[5] = 0x16;			/* Data track, copying allowed */
+	buf[6] = 0x01;			/* Only track is number 1 */
+	store_cdrom_address(&buf[8], msf, 0);
+
+	buf[13] = 0x16;			/* Lead-out track is data */
+	buf[14] = 0xAA;			/* Lead-out track number */
+	store_cdrom_address(&buf[16], msf, curlun->num_sectors);
+	return 20;
+}
+
+
+static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = common->curlun;
+	int		mscmnd = common->cmnd[0];
+	u8		*buf = (u8 *) bh->buf;
+	u8		*buf0 = buf;
+	int		pc, page_code;
+	int		changeable_values, all_pages;
+	int		valid_page = 0;
+	int		len, limit;
+
+	if ((common->cmnd[1] & ~0x08) != 0) {	/* Mask away DBD */
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+	pc = common->cmnd[2] >> 6;
+	page_code = common->cmnd[2] & 0x3f;
+	if (pc == 3) {
+		curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
+		return -EINVAL;
+	}
+	changeable_values = (pc == 1);
+	all_pages = (page_code == 0x3f);
+
+	/* Write the mode parameter header.  Fixed values are: default
+	 * medium type, no cache control (DPOFUA), and no block descriptors.
+	 * The only variable value is the WriteProtect bit.  We will fill in
+	 * the mode data length later. */
+	memset(buf, 0, 8);
+	if (mscmnd == SC_MODE_SENSE_6) {
+		buf[2] = (curlun->ro ? 0x80 : 0x00);		/* WP, DPOFUA */
+		buf += 4;
+		limit = 255;
+	} else {			/* SC_MODE_SENSE_10 */
+		buf[3] = (curlun->ro ? 0x80 : 0x00);		/* WP, DPOFUA */
+		buf += 8;
+		limit = 65535;		/* Should really be FSG_BUFLEN */
+	}
+
+	/* No block descriptors */
+
+	/* The mode pages, in numerical order.  The only page we support
+	 * is the Caching page. */
+	if (page_code == 0x08 || all_pages) {
+		valid_page = 1;
+		buf[0] = 0x08;		/* Page code */
+		buf[1] = 10;		/* Page length */
+		memset(buf+2, 0, 10);	/* None of the fields are changeable */
+
+		if (!changeable_values) {
+			buf[2] = 0x04;	/* Write cache enable, */
+					/* Read cache not disabled */
+					/* No cache retention priorities */
+			put_unaligned_be16(0xffff, &buf[4]);
+					/* Don't disable prefetch */
+					/* Minimum prefetch = 0 */
+			put_unaligned_be16(0xffff, &buf[8]);
+					/* Maximum prefetch */
+			put_unaligned_be16(0xffff, &buf[10]);
+					/* Maximum prefetch ceiling */
+		}
+		buf += 12;
+	}
+
+	/* Check that a valid page was requested and the mode data length
+	 * isn't too long. */
+	len = buf - buf0;
+	if (!valid_page || len > limit) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	/*  Store the mode data length */
+	if (mscmnd == SC_MODE_SENSE_6)
+		buf0[0] = len - 1;
+	else
+		put_unaligned_be16(len - 2, buf0);
+	return len;
+}
+
+
+static int do_start_stop(struct fsg_common *common)
+{
+	if (!common->curlun) {
+		return -EINVAL;
+	} else if (!common->curlun->removable) {
+		common->curlun->sense_data = SS_INVALID_COMMAND;
+		return -EINVAL;
+	}
+	return 0;
+}
+
+
+static int do_prevent_allow(struct fsg_common *common)
+{
+	struct fsg_lun	*curlun = common->curlun;
+	int		prevent;
+
+	if (!common->curlun) {
+		return -EINVAL;
+	} else if (!common->curlun->removable) {
+		common->curlun->sense_data = SS_INVALID_COMMAND;
+		return -EINVAL;
+	}
+
+	prevent = common->cmnd[4] & 0x01;
+	if ((common->cmnd[4] & ~0x01) != 0) {	/* Mask away Prevent */
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	if (curlun->prevent_medium_removal && !prevent)
+		fsg_lun_fsync_sub(curlun);
+	curlun->prevent_medium_removal = prevent;
+	return 0;
+}
+
+
+static int do_read_format_capacities(struct fsg_common *common,
+			struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = common->curlun;
+	u8		*buf = (u8 *) bh->buf;
+
+	buf[0] = buf[1] = buf[2] = 0;
+	buf[3] = 8;	/* Only the Current/Maximum Capacity Descriptor */
+	buf += 4;
+
+	put_unaligned_be32(curlun->num_sectors, &buf[0]);
+						/* Number of blocks */
+	put_unaligned_be32(512, &buf[4]);	/* Block length */
+	buf[4] = 0x02;				/* Current capacity */
+	return 12;
+}
+
+
+static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = common->curlun;
+
+	/* We don't support MODE SELECT */
+	if (curlun)
+		curlun->sense_data = SS_INVALID_COMMAND;
+	return -EINVAL;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
+{
+	int	rc;
+
+	rc = fsg_set_halt(fsg, fsg->bulk_in);
+	if (rc == -EAGAIN)
+		VDBG(fsg, "delayed bulk-in endpoint halt\n");
+	while (rc != 0) {
+		if (rc != -EAGAIN) {
+			WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
+			rc = 0;
+			break;
+		}
+
+		/* Wait for a short time and then try again */
+		if (msleep_interruptible(100) != 0)
+			return -EINTR;
+		rc = usb_ep_set_halt(fsg->bulk_in);
+	}
+	return rc;
+}
+
+static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
+{
+	int	rc;
+
+	DBG(fsg, "bulk-in set wedge\n");
+	rc = usb_ep_set_wedge(fsg->bulk_in);
+	if (rc == -EAGAIN)
+		VDBG(fsg, "delayed bulk-in endpoint wedge\n");
+	while (rc != 0) {
+		if (rc != -EAGAIN) {
+			WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
+			rc = 0;
+			break;
+		}
+
+		/* Wait for a short time and then try again */
+		if (msleep_interruptible(100) != 0)
+			return -EINTR;
+		rc = usb_ep_set_wedge(fsg->bulk_in);
+	}
+	return rc;
+}
+
+static int pad_with_zeros(struct fsg_dev *fsg)
+{
+	struct fsg_buffhd	*bh = fsg->common->next_buffhd_to_fill;
+	u32			nkeep = bh->inreq->length;
+	u32			nsend;
+	int			rc;
+
+	bh->state = BUF_STATE_EMPTY;		/* For the first iteration */
+	fsg->common->usb_amount_left = nkeep + fsg->common->residue;
+	while (fsg->common->usb_amount_left > 0) {
+
+		/* Wait for the next buffer to be free */
+		while (bh->state != BUF_STATE_EMPTY) {
+			rc = sleep_thread(fsg->common);
+			if (rc)
+				return rc;
+		}
+
+		nsend = min(fsg->common->usb_amount_left, FSG_BUFLEN);
+		memset(bh->buf + nkeep, 0, nsend - nkeep);
+		bh->inreq->length = nsend;
+		bh->inreq->zero = 0;
+		start_transfer(fsg, fsg->bulk_in, bh->inreq,
+				&bh->inreq_busy, &bh->state);
+		bh = fsg->common->next_buffhd_to_fill = bh->next;
+		fsg->common->usb_amount_left -= nsend;
+		nkeep = 0;
+	}
+	return 0;
+}
+
+static int throw_away_data(struct fsg_common *common)
+{
+	struct fsg_buffhd	*bh;
+	u32			amount;
+	int			rc;
+
+	for (bh = common->next_buffhd_to_drain;
+	     bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0;
+	     bh = common->next_buffhd_to_drain) {
+
+		/* Throw away the data in a filled buffer */
+		if (bh->state == BUF_STATE_FULL) {
+			smp_rmb();
+			bh->state = BUF_STATE_EMPTY;
+			common->next_buffhd_to_drain = bh->next;
+
+			/* A short packet or an error ends everything */
+			if (bh->outreq->actual != bh->outreq->length ||
+					bh->outreq->status != 0) {
+				raise_exception(common,
+						FSG_STATE_ABORT_BULK_OUT);
+				return -EINTR;
+			}
+			continue;
+		}
+
+		/* Try to submit another request if we need one */
+		bh = common->next_buffhd_to_fill;
+		if (bh->state == BUF_STATE_EMPTY
+		 && common->usb_amount_left > 0) {
+			amount = min(common->usb_amount_left, FSG_BUFLEN);
+
+			/* amount is always divisible by 512, hence by
+			 * the bulk-out maxpacket size */
+			bh->outreq->length = amount;
+			bh->bulk_out_intended_length = amount;
+			bh->outreq->short_not_ok = 1;
+			START_TRANSFER_OR(common, bulk_out, bh->outreq,
+					  &bh->outreq_busy, &bh->state)
+				/* Don't know what to do if
+				 * common->fsg is NULL */
+				return -EIO;
+			common->next_buffhd_to_fill = bh->next;
+			common->usb_amount_left -= amount;
+			continue;
+		}
+
+		/* Otherwise wait for something to happen */
+		rc = sleep_thread(common);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
+
+static int finish_reply(struct fsg_common *common)
+{
+	struct fsg_buffhd	*bh = common->next_buffhd_to_fill;
+	int			rc = 0;
+
+	switch (common->data_dir) {
+	case DATA_DIR_NONE:
+		break;			/* Nothing to send */
+
+	/* If we don't know whether the host wants to read or write,
+	 * this must be CB or CBI with an unknown command.  We mustn't
+	 * try to send or receive any data.  So stall both bulk pipes
+	 * if we can and wait for a reset. */
+	case DATA_DIR_UNKNOWN:
+		if (!common->can_stall) {
+			/* Nothing */
+		} else if (fsg_is_set(common)) {
+			fsg_set_halt(common->fsg, common->fsg->bulk_out);
+			rc = halt_bulk_in_endpoint(common->fsg);
+		} else {
+			/* Don't know what to do if common->fsg is NULL */
+			rc = -EIO;
+		}
+		break;
+
+	/* All but the last buffer of data must have already been sent */
+	case DATA_DIR_TO_HOST:
+		if (common->data_size == 0) {
+			/* Nothing to send */
+
+		/* If there's no residue, simply send the last buffer */
+		} else if (common->residue == 0) {
+			bh->inreq->zero = 0;
+			START_TRANSFER_OR(common, bulk_in, bh->inreq,
+					  &bh->inreq_busy, &bh->state)
+				return -EIO;
+			common->next_buffhd_to_fill = bh->next;
+
+		/* For Bulk-only, if we're allowed to stall then send the
+		 * short packet and halt the bulk-in endpoint.  If we can't
+		 * stall, pad out the remaining data with 0's. */
+		} else if (common->can_stall) {
+			bh->inreq->zero = 1;
+			START_TRANSFER_OR(common, bulk_in, bh->inreq,
+					  &bh->inreq_busy, &bh->state)
+				/* Don't know what to do if
+				 * common->fsg is NULL */
+				rc = -EIO;
+			common->next_buffhd_to_fill = bh->next;
+			if (common->fsg)
+				rc = halt_bulk_in_endpoint(common->fsg);
+		} else if (fsg_is_set(common)) {
+			rc = pad_with_zeros(common->fsg);
+		} else {
+			/* Don't know what to do if common->fsg is NULL */
+			rc = -EIO;
+		}
+		break;
+
+	/* We have processed all we want from the data the host has sent.
+	 * There may still be outstanding bulk-out requests. */
+	case DATA_DIR_FROM_HOST:
+		if (common->residue == 0) {
+			/* Nothing to receive */
+
+		/* Did the host stop sending unexpectedly early? */
+		} else if (common->short_packet_received) {
+			raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
+			rc = -EINTR;
+
+		/* We haven't processed all the incoming data.  Even though
+		 * we may be allowed to stall, doing so would cause a race.
+		 * The controller may already have ACK'ed all the remaining
+		 * bulk-out packets, in which case the host wouldn't see a
+		 * STALL.  Not realizing the endpoint was halted, it wouldn't
+		 * clear the halt -- leading to problems later on. */
+#if 0
+		} else if (common->can_stall) {
+			if (fsg_is_set(common))
+				fsg_set_halt(common->fsg,
+					     common->fsg->bulk_out);
+			raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
+			rc = -EINTR;
+#endif
+
+		/* We can't stall.  Read in the excess data and throw it
+		 * all away. */
+		} else {
+			rc = throw_away_data(common);
+		}
+		break;
+	}
+	return rc;
+}
+
+
+static int send_status(struct fsg_common *common)
+{
+	struct fsg_lun		*curlun = common->curlun;
+	struct fsg_buffhd	*bh;
+	struct bulk_cs_wrap	*csw;
+	int			rc;
+	u8			status = USB_STATUS_PASS;
+	u32			sd, sdinfo = 0;
+
+	/* Wait for the next buffer to become available */
+	bh = common->next_buffhd_to_fill;
+	while (bh->state != BUF_STATE_EMPTY) {
+		rc = sleep_thread(common);
+		if (rc)
+			return rc;
+	}
+
+	if (curlun) {
+		sd = curlun->sense_data;
+		sdinfo = curlun->sense_data_info;
+	} else if (common->bad_lun_okay)
+		sd = SS_NO_SENSE;
+	else
+		sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
+
+	if (common->phase_error) {
+		DBG(common, "sending phase-error status\n");
+		status = USB_STATUS_PHASE_ERROR;
+		sd = SS_INVALID_COMMAND;
+	} else if (sd != SS_NO_SENSE) {
+		DBG(common, "sending command-failure status\n");
+		status = USB_STATUS_FAIL;
+		VDBG(common, "  sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
+				"  info x%x\n",
+				SK(sd), ASC(sd), ASCQ(sd), sdinfo);
+	}
+
+	/* Store and send the Bulk-only CSW */
+	csw = (void *)bh->buf;
+
+	csw->Signature = cpu_to_le32(USB_BULK_CS_SIG);
+	csw->Tag = common->tag;
+	csw->Residue = cpu_to_le32(common->residue);
+	csw->Status = status;
+
+	bh->inreq->length = USB_BULK_CS_WRAP_LEN;
+	bh->inreq->zero = 0;
+	START_TRANSFER_OR(common, bulk_in, bh->inreq,
+			  &bh->inreq_busy, &bh->state)
+		/* Don't know what to do if common->fsg is NULL */
+		return -EIO;
+
+	common->next_buffhd_to_fill = bh->next;
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Check whether the command is properly formed and whether its data size
+ * and direction agree with the values we already have. */
+static int check_command(struct fsg_common *common, int cmnd_size,
+		enum data_direction data_dir, unsigned int mask,
+		int needs_medium, const char *name)
+{
+	int			i;
+	int			lun = common->cmnd[1] >> 5;
+	static const char	dirletter[4] = {'u', 'o', 'i', 'n'};
+	char			hdlen[20];
+	struct fsg_lun		*curlun;
+
+	hdlen[0] = 0;
+	if (common->data_dir != DATA_DIR_UNKNOWN)
+		sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir],
+				common->data_size);
+	VDBG(common, "SCSI command: %s;  Dc=%d, D%c=%u;  Hc=%d%s\n",
+	     name, cmnd_size, dirletter[(int) data_dir],
+	     common->data_size_from_cmnd, common->cmnd_size, hdlen);
+
+	/* We can't reply at all until we know the correct data direction
+	 * and size. */
+	if (common->data_size_from_cmnd == 0)
+		data_dir = DATA_DIR_NONE;
+	if (common->data_size < common->data_size_from_cmnd) {
+		/* Host data size < Device data size is a phase error.
+		 * Carry out the command, but only transfer as much as
+		 * we are allowed. */
+		common->data_size_from_cmnd = common->data_size;
+		common->phase_error = 1;
+	}
+	common->residue = common->data_size;
+	common->usb_amount_left = common->data_size;
+
+	/* Conflicting data directions is a phase error */
+	if (common->data_dir != data_dir
+	 && common->data_size_from_cmnd > 0) {
+		common->phase_error = 1;
+		return -EINVAL;
+	}
+
+	/* Verify the length of the command itself */
+	if (cmnd_size != common->cmnd_size) {
+
+		/* Special case workaround: There are plenty of buggy SCSI
+		 * implementations. Many have issues with cbw->Length
+		 * field passing a wrong command size. For those cases we
+		 * always try to work around the problem by using the length
+		 * sent by the host side provided it is at least as large
+		 * as the correct command length.
+		 * Examples of such cases would be MS-Windows, which issues
+		 * REQUEST SENSE with cbw->Length == 12 where it should
+		 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
+		 * REQUEST SENSE with cbw->Length == 10 where it should
+		 * be 6 as well.
+		 */
+		if (cmnd_size <= common->cmnd_size) {
+			DBG(common, "%s is buggy! Expected length %d "
+			    "but we got %d\n", name,
+			    cmnd_size, common->cmnd_size);
+			cmnd_size = common->cmnd_size;
+		} else {
+			common->phase_error = 1;
+			return -EINVAL;
+		}
+	}
+
+	/* Check that the LUN values are consistent */
+	if (common->lun != lun)
+		DBG(common, "using LUN %d from CBW, not LUN %d from CDB\n",
+		    common->lun, lun);
+
+	/* Check the LUN */
+	if (common->lun >= 0 && common->lun < common->nluns) {
+		curlun = &common->luns[common->lun];
+		common->curlun = curlun;
+		if (common->cmnd[0] != SC_REQUEST_SENSE) {
+			curlun->sense_data = SS_NO_SENSE;
+			curlun->sense_data_info = 0;
+			curlun->info_valid = 0;
+		}
+	} else {
+		common->curlun = NULL;
+		curlun = NULL;
+		common->bad_lun_okay = 0;
+
+		/* INQUIRY and REQUEST SENSE commands are explicitly allowed
+		 * to use unsupported LUNs; all others may not. */
+		if (common->cmnd[0] != SC_INQUIRY &&
+		    common->cmnd[0] != SC_REQUEST_SENSE) {
+			DBG(common, "unsupported LUN %d\n", common->lun);
+			return -EINVAL;
+		}
+	}
+
+	/* If a unit attention condition exists, only INQUIRY and
+	 * REQUEST SENSE commands are allowed; anything else must fail. */
+	if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
+			common->cmnd[0] != SC_INQUIRY &&
+			common->cmnd[0] != SC_REQUEST_SENSE) {
+		curlun->sense_data = curlun->unit_attention_data;
+		curlun->unit_attention_data = SS_NO_SENSE;
+		return -EINVAL;
+	}
+
+	/* Check that only command bytes listed in the mask are non-zero */
+	common->cmnd[1] &= 0x1f;			/* Mask away the LUN */
+	for (i = 1; i < cmnd_size; ++i) {
+		if (common->cmnd[i] && !(mask & (1 << i))) {
+			if (curlun)
+				curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+			return -EINVAL;
+		}
+	}
+
+	/* If the medium isn't mounted and the command needs to access
+	 * it, return an error. */
+	if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
+		curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+
+static int do_scsi_command(struct fsg_common *common)
+{
+	struct fsg_buffhd	*bh;
+	int			rc;
+	int			reply = -EINVAL;
+	int			i;
+	static char		unknown[16];
+
+	dump_cdb(common);
+
+	/* Wait for the next buffer to become available for data or status */
+	bh = common->next_buffhd_to_fill;
+	common->next_buffhd_to_drain = bh;
+	while (bh->state != BUF_STATE_EMPTY) {
+		rc = sleep_thread(common);
+		if (rc)
+			return rc;
+	}
+	common->phase_error = 0;
+	common->short_packet_received = 0;
+
+	down_read(&common->filesem);	/* We're using the backing file */
+	switch (common->cmnd[0]) {
+
+	case SC_INQUIRY:
+		common->data_size_from_cmnd = common->cmnd[4];
+		reply = check_command(common, 6, DATA_DIR_TO_HOST,
+				      (1<<4), 0,
+				      "INQUIRY");
+		if (reply == 0)
+			reply = do_inquiry(common, bh);
+		break;
+
+	case SC_MODE_SELECT_6:
+		common->data_size_from_cmnd = common->cmnd[4];
+		reply = check_command(common, 6, DATA_DIR_FROM_HOST,
+				      (1<<1) | (1<<4), 0,
+				      "MODE SELECT(6)");
+		if (reply == 0)
+			reply = do_mode_select(common, bh);
+		break;
+
+	case SC_MODE_SELECT_10:
+		common->data_size_from_cmnd =
+			get_unaligned_be16(&common->cmnd[7]);
+		reply = check_command(common, 10, DATA_DIR_FROM_HOST,
+				      (1<<1) | (3<<7), 0,
+				      "MODE SELECT(10)");
+		if (reply == 0)
+			reply = do_mode_select(common, bh);
+		break;
+
+	case SC_MODE_SENSE_6:
+		common->data_size_from_cmnd = common->cmnd[4];
+		reply = check_command(common, 6, DATA_DIR_TO_HOST,
+				      (1<<1) | (1<<2) | (1<<4), 0,
+				      "MODE SENSE(6)");
+		if (reply == 0)
+			reply = do_mode_sense(common, bh);
+		break;
+
+	case SC_MODE_SENSE_10:
+		common->data_size_from_cmnd =
+			get_unaligned_be16(&common->cmnd[7]);
+		reply = check_command(common, 10, DATA_DIR_TO_HOST,
+				      (1<<1) | (1<<2) | (3<<7), 0,
+				      "MODE SENSE(10)");
+		if (reply == 0)
+			reply = do_mode_sense(common, bh);
+		break;
+
+	case SC_PREVENT_ALLOW_MEDIUM_REMOVAL:
+		common->data_size_from_cmnd = 0;
+		reply = check_command(common, 6, DATA_DIR_NONE,
+				      (1<<4), 0,
+				      "PREVENT-ALLOW MEDIUM REMOVAL");
+		if (reply == 0)
+			reply = do_prevent_allow(common);
+		break;
+
+	case SC_READ_6:
+		i = common->cmnd[4];
+		common->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
+		reply = check_command(common, 6, DATA_DIR_TO_HOST,
+				      (7<<1) | (1<<4), 1,
+				      "READ(6)");
+		if (reply == 0)
+			reply = do_read(common);
+		break;
+
+	case SC_READ_10:
+		common->data_size_from_cmnd =
+				get_unaligned_be16(&common->cmnd[7]) << 9;
+		reply = check_command(common, 10, DATA_DIR_TO_HOST,
+				      (1<<1) | (0xf<<2) | (3<<7), 1,
+				      "READ(10)");
+		if (reply == 0)
+			reply = do_read(common);
+		break;
+
+	case SC_READ_12:
+		common->data_size_from_cmnd =
+				get_unaligned_be32(&common->cmnd[6]) << 9;
+		reply = check_command(common, 12, DATA_DIR_TO_HOST,
+				      (1<<1) | (0xf<<2) | (0xf<<6), 1,
+				      "READ(12)");
+		if (reply == 0)
+			reply = do_read(common);
+		break;
+
+	case SC_READ_CAPACITY:
+		common->data_size_from_cmnd = 8;
+		reply = check_command(common, 10, DATA_DIR_TO_HOST,
+				      (0xf<<2) | (1<<8), 1,
+				      "READ CAPACITY");
+		if (reply == 0)
+			reply = do_read_capacity(common, bh);
+		break;
+
+	case SC_READ_HEADER:
+		if (!common->curlun || !common->curlun->cdrom)
+			goto unknown_cmnd;
+		common->data_size_from_cmnd =
+			get_unaligned_be16(&common->cmnd[7]);
+		reply = check_command(common, 10, DATA_DIR_TO_HOST,
+				      (3<<7) | (0x1f<<1), 1,
+				      "READ HEADER");
+		if (reply == 0)
+			reply = do_read_header(common, bh);
+		break;
+
+	case SC_READ_TOC:
+		if (!common->curlun || !common->curlun->cdrom)
+			goto unknown_cmnd;
+		common->data_size_from_cmnd =
+			get_unaligned_be16(&common->cmnd[7]);
+		reply = check_command(common, 10, DATA_DIR_TO_HOST,
+				      (7<<6) | (1<<1), 1,
+				      "READ TOC");
+		if (reply == 0)
+			reply = do_read_toc(common, bh);
+		break;
+
+	case SC_READ_FORMAT_CAPACITIES:
+		common->data_size_from_cmnd =
+			get_unaligned_be16(&common->cmnd[7]);
+		reply = check_command(common, 10, DATA_DIR_TO_HOST,
+				      (3<<7), 1,
+				      "READ FORMAT CAPACITIES");
+		if (reply == 0)
+			reply = do_read_format_capacities(common, bh);
+		break;
+
+	case SC_REQUEST_SENSE:
+		common->data_size_from_cmnd = common->cmnd[4];
+		reply = check_command(common, 6, DATA_DIR_TO_HOST,
+				      (1<<4), 0,
+				      "REQUEST SENSE");
+		if (reply == 0)
+			reply = do_request_sense(common, bh);
+		break;
+
+	case SC_START_STOP_UNIT:
+		common->data_size_from_cmnd = 0;
+		reply = check_command(common, 6, DATA_DIR_NONE,
+				      (1<<1) | (1<<4), 0,
+				      "START-STOP UNIT");
+		if (reply == 0)
+			reply = do_start_stop(common);
+		break;
+
+	case SC_SYNCHRONIZE_CACHE:
+		common->data_size_from_cmnd = 0;
+		reply = check_command(common, 10, DATA_DIR_NONE,
+				      (0xf<<2) | (3<<7), 1,
+				      "SYNCHRONIZE CACHE");
+		if (reply == 0)
+			reply = do_synchronize_cache(common);
+		break;
+
+	case SC_TEST_UNIT_READY:
+		common->data_size_from_cmnd = 0;
+		reply = check_command(common, 6, DATA_DIR_NONE,
+				0, 1,
+				"TEST UNIT READY");
+		break;
+
+	/* Although optional, this command is used by MS-Windows.  We
+	 * support a minimal version: BytChk must be 0. */
+	case SC_VERIFY:
+		common->data_size_from_cmnd = 0;
+		reply = check_command(common, 10, DATA_DIR_NONE,
+				      (1<<1) | (0xf<<2) | (3<<7), 1,
+				      "VERIFY");
+		if (reply == 0)
+			reply = do_verify(common);
+		break;
+
+	case SC_WRITE_6:
+		i = common->cmnd[4];
+		common->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
+		reply = check_command(common, 6, DATA_DIR_FROM_HOST,
+				      (7<<1) | (1<<4), 1,
+				      "WRITE(6)");
+		if (reply == 0)
+			reply = do_write(common);
+		break;
+
+	case SC_WRITE_10:
+		common->data_size_from_cmnd =
+				get_unaligned_be16(&common->cmnd[7]) << 9;
+		reply = check_command(common, 10, DATA_DIR_FROM_HOST,
+				      (1<<1) | (0xf<<2) | (3<<7), 1,
+				      "WRITE(10)");
+		if (reply == 0)
+			reply = do_write(common);
+		break;
+
+	case SC_WRITE_12:
+		common->data_size_from_cmnd =
+				get_unaligned_be32(&common->cmnd[6]) << 9;
+		reply = check_command(common, 12, DATA_DIR_FROM_HOST,
+				      (1<<1) | (0xf<<2) | (0xf<<6), 1,
+				      "WRITE(12)");
+		if (reply == 0)
+			reply = do_write(common);
+		break;
+
+	/* Some mandatory commands that we recognize but don't implement.
+	 * They don't mean much in this setting.  It's left as an exercise
+	 * for anyone interested to implement RESERVE and RELEASE in terms
+	 * of Posix locks. */
+	case SC_FORMAT_UNIT:
+	case SC_RELEASE:
+	case SC_RESERVE:
+	case SC_SEND_DIAGNOSTIC:
+		/* Fall through */
+
+	default:
+unknown_cmnd:
+		common->data_size_from_cmnd = 0;
+		sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
+		reply = check_command(common, common->cmnd_size,
+				      DATA_DIR_UNKNOWN, 0xff, 0, unknown);
+		if (reply == 0) {
+			common->curlun->sense_data = SS_INVALID_COMMAND;
+			reply = -EINVAL;
+		}
+		break;
+	}
+	up_read(&common->filesem);
+
+	if (reply == -EINTR || signal_pending(current))
+		return -EINTR;
+
+	/* Set up the single reply buffer for finish_reply() */
+	if (reply == -EINVAL)
+		reply = 0;		/* Error reply length */
+	if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) {
+		reply = min((u32) reply, common->data_size_from_cmnd);
+		bh->inreq->length = reply;
+		bh->state = BUF_STATE_FULL;
+		common->residue -= reply;
+	}				/* Otherwise it's already set */
+
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct usb_request	*req = bh->outreq;
+	struct fsg_bulk_cb_wrap	*cbw = req->buf;
+	struct fsg_common	*common = fsg->common;
+
+	/* Was this a real packet?  Should it be ignored? */
+	if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
+		return -EINVAL;
+
+	/* Is the CBW valid? */
+	if (req->actual != USB_BULK_CB_WRAP_LEN ||
+			cbw->Signature != cpu_to_le32(
+				USB_BULK_CB_SIG)) {
+		DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
+				req->actual,
+				le32_to_cpu(cbw->Signature));
+
+		/* The Bulk-only spec says we MUST stall the IN endpoint
+		 * (6.6.1), so it's unavoidable.  It also says we must
+		 * retain this state until the next reset, but there's
+		 * no way to tell the controller driver it should ignore
+		 * Clear-Feature(HALT) requests.
+		 *
+		 * We aren't required to halt the OUT endpoint; instead
+		 * we can simply accept and discard any data received
+		 * until the next reset. */
+		wedge_bulk_in_endpoint(fsg);
+		set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
+		return -EINVAL;
+	}
+
+	/* Is the CBW meaningful? */
+	if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
+			cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
+		DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
+				"cmdlen %u\n",
+				cbw->Lun, cbw->Flags, cbw->Length);
+
+		/* We can do anything we want here, so let's stall the
+		 * bulk pipes if we are allowed to. */
+		if (common->can_stall) {
+			fsg_set_halt(fsg, fsg->bulk_out);
+			halt_bulk_in_endpoint(fsg);
+		}
+		return -EINVAL;
+	}
+
+	/* Save the command for later */
+	common->cmnd_size = cbw->Length;
+	memcpy(common->cmnd, cbw->CDB, common->cmnd_size);
+	if (cbw->Flags & USB_BULK_IN_FLAG)
+		common->data_dir = DATA_DIR_TO_HOST;
+	else
+		common->data_dir = DATA_DIR_FROM_HOST;
+	common->data_size = le32_to_cpu(cbw->DataTransferLength);
+	if (common->data_size == 0)
+		common->data_dir = DATA_DIR_NONE;
+	common->lun = cbw->Lun;
+	common->tag = cbw->Tag;
+	return 0;
+}
+
+
+static int get_next_command(struct fsg_common *common)
+{
+	struct fsg_buffhd	*bh;
+	int			rc = 0;
+
+	/* Wait for the next buffer to become available */
+	bh = common->next_buffhd_to_fill;
+	while (bh->state != BUF_STATE_EMPTY) {
+		rc = sleep_thread(common);
+		if (rc)
+			return rc;
+	}
+
+	/* Queue a request to read a Bulk-only CBW */
+	set_bulk_out_req_length(common, bh, USB_BULK_CB_WRAP_LEN);
+	bh->outreq->short_not_ok = 1;
+	START_TRANSFER_OR(common, bulk_out, bh->outreq,
+			  &bh->outreq_busy, &bh->state)
+		/* Don't know what to do if common->fsg is NULL */
+		return -EIO;
+
+	/* We will drain the buffer in software, which means we
+	 * can reuse it for the next filling.  No need to advance
+	 * next_buffhd_to_fill. */
+
+	/* Wait for the CBW to arrive */
+	while (bh->state != BUF_STATE_FULL) {
+		rc = sleep_thread(common);
+		if (rc)
+			return rc;
+	}
+	smp_rmb();
+	rc = fsg_is_set(common) ? received_cbw(common->fsg, bh) : -EIO;
+	bh->state = BUF_STATE_EMPTY;
+
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int enable_endpoint(struct fsg_common *common, struct usb_ep *ep,
+		const struct usb_endpoint_descriptor *d)
+{
+	int	rc;
+
+	ep->driver_data = common;
+	rc = usb_ep_enable(ep, d);
+	if (rc)
+		ERROR(common, "can't enable %s, result %d\n", ep->name, rc);
+	return rc;
+}
+
+static int alloc_request(struct fsg_common *common, struct usb_ep *ep,
+		struct usb_request **preq)
+{
+	*preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
+	if (*preq)
+		return 0;
+	ERROR(common, "can't allocate request for %s\n", ep->name);
+	return -ENOMEM;
+}
+
+/*
+ * Reset interface setting and re-init endpoint state (toggle etc).
+ * Call with altsetting < 0 to disable the interface.  The only other
+ * available altsetting is 0, which enables the interface.
+ */
+static int do_set_interface(struct fsg_common *common, int altsetting)
+{
+	int	rc = 0;
+	int	i;
+	const struct usb_endpoint_descriptor	*d;
+
+	if (common->running)
+		DBG(common, "reset interface\n");
+
+reset:
+	/* Deallocate the requests */
+	if (common->prev_fsg) {
+		struct fsg_dev *fsg = common->prev_fsg;
+
+		for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
+			struct fsg_buffhd *bh = &common->buffhds[i];
+
+			if (bh->inreq) {
+				usb_ep_free_request(fsg->bulk_in, bh->inreq);
+				bh->inreq = NULL;
+			}
+			if (bh->outreq) {
+				usb_ep_free_request(fsg->bulk_out, bh->outreq);
+				bh->outreq = NULL;
+			}
+		}
+
+		/* Disable the endpoints */
+		if (fsg->bulk_in_enabled) {
+			usb_ep_disable(fsg->bulk_in);
+			fsg->bulk_in_enabled = 0;
+		}
+		if (fsg->bulk_out_enabled) {
+			usb_ep_disable(fsg->bulk_out);
+			fsg->bulk_out_enabled = 0;
+		}
+
+		common->prev_fsg = 0;
+	}
+
+	common->running = 0;
+	if (altsetting < 0 || rc != 0)
+		return rc;
+
+	DBG(common, "set interface %d\n", altsetting);
+
+	if (fsg_is_set(common)) {
+		struct fsg_dev *fsg = common->fsg;
+		common->prev_fsg = common->fsg;
+
+		/* Enable the endpoints */
+		d = fsg_ep_desc(common->gadget,
+				&fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc);
+		rc = enable_endpoint(common, fsg->bulk_in, d);
+		if (rc)
+			goto reset;
+		fsg->bulk_in_enabled = 1;
+
+		d = fsg_ep_desc(common->gadget,
+				&fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc);
+		rc = enable_endpoint(common, fsg->bulk_out, d);
+		if (rc)
+			goto reset;
+		fsg->bulk_out_enabled = 1;
+		common->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize);
+		clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
+
+		/* Allocate the requests */
+		for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
+			struct fsg_buffhd	*bh = &common->buffhds[i];
+
+			rc = alloc_request(common, fsg->bulk_in, &bh->inreq);
+			if (rc)
+				goto reset;
+			rc = alloc_request(common, fsg->bulk_out, &bh->outreq);
+			if (rc)
+				goto reset;
+			bh->inreq->buf = bh->outreq->buf = bh->buf;
+			bh->inreq->context = bh->outreq->context = bh;
+			bh->inreq->complete = bulk_in_complete;
+			bh->outreq->complete = bulk_out_complete;
+		}
+
+		common->running = 1;
+		for (i = 0; i < common->nluns; ++i)
+			common->luns[i].unit_attention_data = SS_RESET_OCCURRED;
+		return rc;
+	} else {
+		return -EIO;
+	}
+}
+
+
+/*
+ * Change our operational configuration.  This code must agree with the code
+ * that returns config descriptors, and with interface altsetting code.
+ *
+ * It's also responsible for power management interactions.  Some
+ * configurations might not work with our current power sources.
+ * For now we just assume the gadget is always self-powered.
+ */
+static int do_set_config(struct fsg_common *common, u8 new_config)
+{
+	int	rc = 0;
+
+	/* Disable the single interface */
+	if (common->config != 0) {
+		DBG(common, "reset config\n");
+		common->config = 0;
+		rc = do_set_interface(common, -1);
+	}
+
+	/* Enable the interface */
+	if (new_config != 0) {
+		common->config = new_config;
+		rc = do_set_interface(common, 0);
+		if (rc != 0)
+			common->config = 0;	/* Reset on errors */
+	}
+	return rc;
+}
+
+
+/****************************** ALT CONFIGS ******************************/
+
+
+static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct fsg_dev *fsg = fsg_from_func(f);
+	fsg->common->prev_fsg = fsg->common->fsg;
+	fsg->common->fsg = fsg;
+	fsg->common->new_config = 1;
+	raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
+	return 0;
+}
+
+static void fsg_disable(struct usb_function *f)
+{
+	struct fsg_dev *fsg = fsg_from_func(f);
+	fsg->common->prev_fsg = fsg->common->fsg;
+	fsg->common->fsg = fsg;
+	fsg->common->new_config = 0;
+	raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static void handle_exception(struct fsg_common *common)
+{
+	siginfo_t		info;
+	int			sig;
+	int			i;
+	struct fsg_buffhd	*bh;
+	enum fsg_state		old_state;
+	u8			new_config;
+	struct fsg_lun		*curlun;
+	unsigned int		exception_req_tag;
+	int			rc;
+
+	/* Clear the existing signals.  Anything but SIGUSR1 is converted
+	 * into a high-priority EXIT exception. */
+	for (;;) {
+		sig = dequeue_signal_lock(current, &current->blocked, &info);
+		if (!sig)
+			break;
+		if (sig != SIGUSR1) {
+			if (common->state < FSG_STATE_EXIT)
+				DBG(common, "Main thread exiting on signal\n");
+			raise_exception(common, FSG_STATE_EXIT);
+		}
+	}
+
+	/* Cancel all the pending transfers */
+	if (fsg_is_set(common)) {
+		for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
+			bh = &common->buffhds[i];
+			if (bh->inreq_busy)
+				usb_ep_dequeue(common->fsg->bulk_in, bh->inreq);
+			if (bh->outreq_busy)
+				usb_ep_dequeue(common->fsg->bulk_out,
+					       bh->outreq);
+		}
+
+		/* Wait until everything is idle */
+		for (;;) {
+			int num_active = 0;
+			for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
+				bh = &common->buffhds[i];
+				num_active += bh->inreq_busy + bh->outreq_busy;
+			}
+			if (num_active == 0)
+				break;
+			if (sleep_thread(common))
+				return;
+		}
+
+		/* Clear out the controller's fifos */
+		if (common->fsg->bulk_in_enabled)
+			usb_ep_fifo_flush(common->fsg->bulk_in);
+		if (common->fsg->bulk_out_enabled)
+			usb_ep_fifo_flush(common->fsg->bulk_out);
+	}
+
+	/* Reset the I/O buffer states and pointers, the SCSI
+	 * state, and the exception.  Then invoke the handler. */
+	spin_lock_irq(&common->lock);
+
+	for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
+		bh = &common->buffhds[i];
+		bh->state = BUF_STATE_EMPTY;
+	}
+	common->next_buffhd_to_fill = &common->buffhds[0];
+	common->next_buffhd_to_drain = &common->buffhds[0];
+	exception_req_tag = common->exception_req_tag;
+	new_config = common->new_config;
+	old_state = common->state;
+
+	if (old_state == FSG_STATE_ABORT_BULK_OUT)
+		common->state = FSG_STATE_STATUS_PHASE;
+	else {
+		for (i = 0; i < common->nluns; ++i) {
+			curlun = &common->luns[i];
+			curlun->prevent_medium_removal = 0;
+			curlun->sense_data = SS_NO_SENSE;
+			curlun->unit_attention_data = SS_NO_SENSE;
+			curlun->sense_data_info = 0;
+			curlun->info_valid = 0;
+		}
+		common->state = FSG_STATE_IDLE;
+	}
+	spin_unlock_irq(&common->lock);
+
+	/* Carry out any extra actions required for the exception */
+	switch (old_state) {
+	case FSG_STATE_ABORT_BULK_OUT:
+		send_status(common);
+		spin_lock_irq(&common->lock);
+		if (common->state == FSG_STATE_STATUS_PHASE)
+			common->state = FSG_STATE_IDLE;
+		spin_unlock_irq(&common->lock);
+		break;
+
+	case FSG_STATE_RESET:
+		/* In case we were forced against our will to halt a
+		 * bulk endpoint, clear the halt now.  (The SuperH UDC
+		 * requires this.) */
+		if (!fsg_is_set(common))
+			break;
+		if (test_and_clear_bit(IGNORE_BULK_OUT,
+				       &common->fsg->atomic_bitflags))
+			usb_ep_clear_halt(common->fsg->bulk_in);
+
+		if (common->ep0_req_tag == exception_req_tag)
+			ep0_queue(common);	/* Complete the status stage */
+
+		/* Technically this should go here, but it would only be
+		 * a waste of time.  Ditto for the INTERFACE_CHANGE and
+		 * CONFIG_CHANGE cases. */
+		/* for (i = 0; i < common->nluns; ++i) */
+		/*	common->luns[i].unit_attention_data = */
+		/*		SS_RESET_OCCURRED;  */
+		break;
+
+	case FSG_STATE_CONFIG_CHANGE:
+		rc = do_set_config(common, new_config);
+		if (common->ep0_req_tag != exception_req_tag)
+			break;
+		if (rc != 0) {			/* STALL on errors */
+			DBG(common, "ep0 set halt\n");
+			usb_ep_set_halt(common->ep0);
+		} else {			/* Complete the status stage */
+			ep0_queue(common);
+		}
+		break;
+
+	case FSG_STATE_EXIT:
+	case FSG_STATE_TERMINATED:
+		do_set_config(common, 0);		/* Free resources */
+		spin_lock_irq(&common->lock);
+		common->state = FSG_STATE_TERMINATED;	/* Stop the thread */
+		spin_unlock_irq(&common->lock);
+		break;
+
+	case FSG_STATE_INTERFACE_CHANGE:
+	case FSG_STATE_DISCONNECT:
+	case FSG_STATE_COMMAND_PHASE:
+	case FSG_STATE_DATA_PHASE:
+	case FSG_STATE_STATUS_PHASE:
+	case FSG_STATE_IDLE:
+		break;
+	}
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int fsg_main_thread(void *common_)
+{
+	struct fsg_common	*common = common_;
+
+	/* Allow the thread to be killed by a signal, but set the signal mask
+	 * to block everything but INT, TERM, KILL, and USR1. */
+	allow_signal(SIGINT);
+	allow_signal(SIGTERM);
+	allow_signal(SIGKILL);
+	allow_signal(SIGUSR1);
+
+	/* Allow the thread to be frozen */
+	set_freezable();
+
+	/* Arrange for userspace references to be interpreted as kernel
+	 * pointers.  That way we can pass a kernel pointer to a routine
+	 * that expects a __user pointer and it will work okay. */
+	set_fs(get_ds());
+
+	/* The main loop */
+	while (common->state != FSG_STATE_TERMINATED) {
+		if (exception_in_progress(common) || signal_pending(current)) {
+			handle_exception(common);
+			continue;
+		}
+
+		if (!common->running) {
+			sleep_thread(common);
+			continue;
+		}
+
+		if (get_next_command(common))
+			continue;
+
+		spin_lock_irq(&common->lock);
+		if (!exception_in_progress(common))
+			common->state = FSG_STATE_DATA_PHASE;
+		spin_unlock_irq(&common->lock);
+
+		if (do_scsi_command(common) || finish_reply(common))
+			continue;
+
+		spin_lock_irq(&common->lock);
+		if (!exception_in_progress(common))
+			common->state = FSG_STATE_STATUS_PHASE;
+		spin_unlock_irq(&common->lock);
+
+		if (send_status(common))
+			continue;
+
+		spin_lock_irq(&common->lock);
+		if (!exception_in_progress(common))
+			common->state = FSG_STATE_IDLE;
+		spin_unlock_irq(&common->lock);
+	}
+
+	spin_lock_irq(&common->lock);
+	common->thread_task = NULL;
+	spin_unlock_irq(&common->lock);
+
+	if (common->thread_exits)
+		common->thread_exits(common);
+
+	/* Let the unbind and cleanup routines know the thread has exited */
+	complete_and_exit(&common->thread_notifier, 0);
+}
+
+
+/*************************** DEVICE ATTRIBUTES ***************************/
+
+/* Write permission is checked per LUN in store_*() functions. */
+static DEVICE_ATTR(ro, 0644, fsg_show_ro, fsg_store_ro);
+static DEVICE_ATTR(file, 0644, fsg_show_file, fsg_store_file);
+
+
+/****************************** FSG COMMON ******************************/
+
+static void fsg_common_release(struct kref *ref);
+
+static void fsg_lun_release(struct device *dev)
+{
+	/* Nothing needs to be done */
+}
+
+static inline void fsg_common_get(struct fsg_common *common)
+{
+	kref_get(&common->ref);
+}
+
+static inline void fsg_common_put(struct fsg_common *common)
+{
+	kref_put(&common->ref, fsg_common_release);
+}
+
+
+static struct fsg_common *fsg_common_init(struct fsg_common *common,
+					  struct usb_composite_dev *cdev,
+					  struct fsg_config *cfg)
+{
+	struct usb_gadget *gadget = cdev->gadget;
+	struct fsg_buffhd *bh;
+	struct fsg_lun *curlun;
+	struct fsg_lun_config *lcfg;
+	int nluns, i, rc;
+	char *pathbuf;
+
+	/* Find out how many LUNs there should be */
+	nluns = cfg->nluns;
+	if (nluns < 1 || nluns > FSG_MAX_LUNS) {
+		dev_err(&gadget->dev, "invalid number of LUNs: %u\n", nluns);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Allocate? */
+	if (!common) {
+		common = kzalloc(sizeof *common, GFP_KERNEL);
+		if (!common)
+			return ERR_PTR(-ENOMEM);
+		common->free_storage_on_release = 1;
+	} else {
+		memset(common, 0, sizeof common);
+		common->free_storage_on_release = 0;
+	}
+
+	common->private_data = cfg->private_data;
+
+	common->gadget = gadget;
+	common->ep0 = gadget->ep0;
+	common->ep0req = cdev->req;
+
+	/* Maybe allocate device-global string IDs, and patch descriptors */
+	if (fsg_strings[FSG_STRING_INTERFACE].id == 0) {
+		rc = usb_string_id(cdev);
+		if (rc < 0) {
+			kfree(common);
+			return ERR_PTR(rc);
+		}
+		fsg_strings[FSG_STRING_INTERFACE].id = rc;
+		fsg_intf_desc.iInterface = rc;
+	}
+
+	/* Create the LUNs, open their backing files, and register the
+	 * LUN devices in sysfs. */
+	curlun = kzalloc(nluns * sizeof *curlun, GFP_KERNEL);
+	if (!curlun) {
+		kfree(common);
+		return ERR_PTR(-ENOMEM);
+	}
+	common->luns = curlun;
+
+	init_rwsem(&common->filesem);
+
+	for (i = 0, lcfg = cfg->luns; i < nluns; ++i, ++curlun, ++lcfg) {
+		curlun->cdrom = !!lcfg->cdrom;
+		curlun->ro = lcfg->cdrom || lcfg->ro;
+		curlun->removable = lcfg->removable;
+		curlun->dev.release = fsg_lun_release;
+		curlun->dev.parent = &gadget->dev;
+		/* curlun->dev.driver = &fsg_driver.driver; XXX */
+		dev_set_drvdata(&curlun->dev, &common->filesem);
+		dev_set_name(&curlun->dev,
+			     cfg->lun_name_format
+			   ? cfg->lun_name_format
+			   : "lun%d",
+			     i);
+
+		rc = device_register(&curlun->dev);
+		if (rc) {
+			INFO(common, "failed to register LUN%d: %d\n", i, rc);
+			common->nluns = i;
+			goto error_release;
+		}
+
+		rc = device_create_file(&curlun->dev, &dev_attr_ro);
+		if (rc)
+			goto error_luns;
+		rc = device_create_file(&curlun->dev, &dev_attr_file);
+		if (rc)
+			goto error_luns;
+
+		if (lcfg->filename) {
+			rc = fsg_lun_open(curlun, lcfg->filename);
+			if (rc)
+				goto error_luns;
+		} else if (!curlun->removable) {
+			ERROR(common, "no file given for LUN%d\n", i);
+			rc = -EINVAL;
+			goto error_luns;
+		}
+	}
+	common->nluns = nluns;
+
+
+	/* Data buffers cyclic list */
+	/* Buffers in buffhds are static -- no need for additional
+	 * allocation. */
+	bh = common->buffhds;
+	i = FSG_NUM_BUFFERS - 1;
+	do {
+		bh->next = bh + 1;
+	} while (++bh, --i);
+	bh->next = common->buffhds;
+
+
+	/* Prepare inquiryString */
+	if (cfg->release != 0xffff) {
+		i = cfg->release;
+	} else {
+		/* The sa1100 controller is not supported */
+		i = gadget_is_sa1100(gadget)
+			? -1
+			: usb_gadget_controller_number(gadget);
+		if (i >= 0) {
+			i = 0x0300 + i;
+		} else {
+			WARNING(common, "controller '%s' not recognized\n",
+				gadget->name);
+			i = 0x0399;
+		}
+	}
+#define OR(x, y) ((x) ? (x) : (y))
+	snprintf(common->inquiry_string, sizeof common->inquiry_string,
+		 "%-8s%-16s%04x",
+		 OR(cfg->vendor_name, "Linux   "),
+		 /* Assume product name dependent on the first LUN */
+		 OR(cfg->product_name, common->luns->cdrom
+				     ? "File-Stor Gadget"
+				     : "File-CD Gadget  "),
+		 i);
+
+
+	/* Some peripheral controllers are known not to be able to
+	 * halt bulk endpoints correctly.  If one of them is present,
+	 * disable stalls.
+	 */
+	common->can_stall = cfg->can_stall &&
+		!(gadget_is_sh(common->gadget) ||
+		  gadget_is_at91(common->gadget));
+
+
+	spin_lock_init(&common->lock);
+	kref_init(&common->ref);
+
+
+	/* Tell the thread to start working */
+	common->thread_exits = cfg->thread_exits;
+	common->thread_task =
+		kthread_create(fsg_main_thread, common,
+			       OR(cfg->thread_name, "file-storage"));
+	if (IS_ERR(common->thread_task)) {
+		rc = PTR_ERR(common->thread_task);
+		goto error_release;
+	}
+	init_completion(&common->thread_notifier);
+#undef OR
+
+
+	/* Information */
+	INFO(common, FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
+	INFO(common, "Number of LUNs=%d\n", common->nluns);
+
+	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
+	for (i = 0, nluns = common->nluns, curlun = common->luns;
+	     i < nluns;
+	     ++curlun, ++i) {
+		char *p = "(no medium)";
+		if (fsg_lun_is_open(curlun)) {
+			p = "(error)";
+			if (pathbuf) {
+				p = d_path(&curlun->filp->f_path,
+					   pathbuf, PATH_MAX);
+				if (IS_ERR(p))
+					p = "(error)";
+			}
+		}
+		LINFO(curlun, "LUN: %s%s%sfile: %s\n",
+		      curlun->removable ? "removable " : "",
+		      curlun->ro ? "read only " : "",
+		      curlun->cdrom ? "CD-ROM " : "",
+		      p);
+	}
+	kfree(pathbuf);
+
+	DBG(common, "I/O thread pid: %d\n", task_pid_nr(common->thread_task));
+
+	wake_up_process(common->thread_task);
+
+	return common;
+
+
+error_luns:
+	common->nluns = i + 1;
+error_release:
+	common->state = FSG_STATE_TERMINATED;	/* The thread is dead */
+	/* Call fsg_common_release() directly, ref might be not
+	 * initialised */
+	fsg_common_release(&common->ref);
+	complete(&common->thread_notifier);
+	return ERR_PTR(rc);
+}
+
+
+static void fsg_common_release(struct kref *ref)
+{
+	struct fsg_common *common =
+		container_of(ref, struct fsg_common, ref);
+	unsigned i = common->nluns;
+	struct fsg_lun *lun = common->luns;
+
+	/* If the thread isn't already dead, tell it to exit now */
+	if (common->state != FSG_STATE_TERMINATED) {
+		raise_exception(common, FSG_STATE_EXIT);
+		wait_for_completion(&common->thread_notifier);
+
+		/* The cleanup routine waits for this completion also */
+		complete(&common->thread_notifier);
+	}
+
+	/* Beware tempting for -> do-while optimization: when in error
+	 * recovery nluns may be zero. */
+
+	for (; i; --i, ++lun) {
+		device_remove_file(&lun->dev, &dev_attr_ro);
+		device_remove_file(&lun->dev, &dev_attr_file);
+		fsg_lun_close(lun);
+		device_unregister(&lun->dev);
+	}
+
+	kfree(common->luns);
+	if (common->free_storage_on_release)
+		kfree(common);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+
+static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct fsg_dev		*fsg = fsg_from_func(f);
+
+	DBG(fsg, "unbind\n");
+	fsg_common_put(fsg->common);
+	kfree(fsg);
+}
+
+
+static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct fsg_dev		*fsg = fsg_from_func(f);
+	struct usb_gadget	*gadget = c->cdev->gadget;
+	int			rc;
+	int			i;
+	struct usb_ep		*ep;
+
+	fsg->gadget = gadget;
+
+	/* New interface */
+	i = usb_interface_id(c, f);
+	if (i < 0)
+		return i;
+	fsg_intf_desc.bInterfaceNumber = i;
+	fsg->interface_number = i;
+
+	/* Find all the endpoints we will use */
+	ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
+	if (!ep)
+		goto autoconf_fail;
+	ep->driver_data = fsg->common;	/* claim the endpoint */
+	fsg->bulk_in = ep;
+
+	ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
+	if (!ep)
+		goto autoconf_fail;
+	ep->driver_data = fsg->common;	/* claim the endpoint */
+	fsg->bulk_out = ep;
+
+	if (gadget_is_dualspeed(gadget)) {
+		/* Assume endpoint addresses are the same for both speeds */
+		fsg_hs_bulk_in_desc.bEndpointAddress =
+			fsg_fs_bulk_in_desc.bEndpointAddress;
+		fsg_hs_bulk_out_desc.bEndpointAddress =
+			fsg_fs_bulk_out_desc.bEndpointAddress;
+		f->hs_descriptors = fsg_hs_function;
+	}
+
+	return 0;
+
+autoconf_fail:
+	ERROR(fsg, "unable to autoconfigure all endpoints\n");
+	rc = -ENOTSUPP;
+	fsg_unbind(c, f);
+	return rc;
+}
+
+
+/****************************** ADD FUNCTION ******************************/
+
+static struct usb_gadget_strings *fsg_strings_array[] = {
+	&fsg_stringtab,
+	NULL,
+};
+
+static int fsg_add(struct usb_composite_dev *cdev,
+		   struct usb_configuration *c,
+		   struct fsg_common *common)
+{
+	struct fsg_dev *fsg;
+	int rc;
+
+	fsg = kzalloc(sizeof *fsg, GFP_KERNEL);
+	if (unlikely(!fsg))
+		return -ENOMEM;
+
+	fsg->function.name        = FSG_DRIVER_DESC;
+	fsg->function.strings     = fsg_strings_array;
+	fsg->function.descriptors = fsg_fs_function;
+	fsg->function.bind        = fsg_bind;
+	fsg->function.unbind      = fsg_unbind;
+	fsg->function.setup       = fsg_setup;
+	fsg->function.set_alt     = fsg_set_alt;
+	fsg->function.disable     = fsg_disable;
+
+	fsg->common               = common;
+	/* Our caller holds a reference to common structure so we
+	 * don't have to be worry about it being freed until we return
+	 * from this function.  So instead of incrementing counter now
+	 * and decrement in error recovery we increment it only when
+	 * call to usb_add_function() was successful. */
+
+	rc = usb_add_function(c, &fsg->function);
+
+	if (likely(rc == 0))
+		fsg_common_get(fsg->common);
+	else
+		kfree(fsg);
+
+	return rc;
+}
+
+
+
+/************************* Module parameters *************************/
+
+
+struct fsg_module_parameters {
+	char		*file[FSG_MAX_LUNS];
+	int		ro[FSG_MAX_LUNS];
+	int		removable[FSG_MAX_LUNS];
+	int		cdrom[FSG_MAX_LUNS];
+
+	unsigned int	file_count, ro_count, removable_count, cdrom_count;
+	unsigned int	luns;	/* nluns */
+	int		stall;	/* can_stall */
+};
+
+
+#define _FSG_MODULE_PARAM_ARRAY(prefix, params, name, type, desc)	\
+	module_param_array_named(prefix ## name, params.name, type,	\
+				 &prefix ## params.name ## _count,	\
+				 S_IRUGO);				\
+	MODULE_PARM_DESC(prefix ## name, desc)
+
+#define _FSG_MODULE_PARAM(prefix, params, name, type, desc)		\
+	module_param_named(prefix ## name, params.name, type,		\
+			   S_IRUGO);					\
+	MODULE_PARM_DESC(prefix ## name, desc)
+
+#define FSG_MODULE_PARAMETERS(prefix, params)				\
+	_FSG_MODULE_PARAM_ARRAY(prefix, params, file, charp,		\
+				"names of backing files or devices");	\
+	_FSG_MODULE_PARAM_ARRAY(prefix, params, ro, bool,		\
+				"true to force read-only");		\
+	_FSG_MODULE_PARAM_ARRAY(prefix, params, removable, bool,	\
+				"true to simulate removable media");	\
+	_FSG_MODULE_PARAM_ARRAY(prefix, params, cdrom, bool,		\
+				"true to simulate CD-ROM instead of disk"); \
+	_FSG_MODULE_PARAM(prefix, params, luns, uint,			\
+			  "number of LUNs");				\
+	_FSG_MODULE_PARAM(prefix, params, stall, bool,			\
+			  "false to prevent bulk stalls")
+
+
+static void
+fsg_config_from_params(struct fsg_config *cfg,
+		       const struct fsg_module_parameters *params)
+{
+	struct fsg_lun_config *lun;
+	unsigned i;
+
+	/* Configure LUNs */
+	cfg->nluns =
+		min(params->luns ?: (params->file_count ?: 1u),
+		    (unsigned)FSG_MAX_LUNS);
+	for (i = 0, lun = cfg->luns; i < cfg->nluns; ++i, ++lun) {
+		lun->ro = !!params->ro[i];
+		lun->cdrom = !!params->cdrom[i];
+		lun->removable = /* Removable by default */
+			params->removable_count <= i || params->removable[i];
+		lun->filename =
+			params->file_count > i && params->file[i][0]
+			? params->file[i]
+			: 0;
+	}
+
+	/* Let MSF use defaults */
+	cfg->lun_name_format = 0;
+	cfg->thread_name = 0;
+	cfg->vendor_name = 0;
+	cfg->product_name = 0;
+	cfg->release = 0xffff;
+
+	cfg->thread_exits = 0;
+	cfg->private_data = 0;
+
+	/* Finalise */
+	cfg->can_stall = params->stall;
+}
+
+static inline struct fsg_common *
+fsg_common_from_params(struct fsg_common *common,
+		       struct usb_composite_dev *cdev,
+		       const struct fsg_module_parameters *params)
+	__attribute__((unused));
+static inline struct fsg_common *
+fsg_common_from_params(struct fsg_common *common,
+		       struct usb_composite_dev *cdev,
+		       const struct fsg_module_parameters *params)
+{
+	struct fsg_config cfg;
+	fsg_config_from_params(&cfg, params);
+	return fsg_common_init(common, cdev, &cfg);
+}
+
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index c9966cc..95dae4c 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -4,6 +4,8 @@
  * Copyright (C) 2003-2005,2008 David Brownell
  * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
  * Copyright (C) 2008 Nokia Corporation
+ * Copyright (C) 2009 Samsung Electronics
+ *                    Author: Michal Nazarewicz (m.nazarewicz@samsung.com)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -149,8 +151,8 @@
 	.bDataInterface =	0x01,
 };
 
-static struct usb_cdc_acm_descriptor acm_descriptor __initdata = {
-	.bLength =		sizeof acm_descriptor,
+static struct usb_cdc_acm_descriptor rndis_acm_descriptor __initdata = {
+	.bLength =		sizeof rndis_acm_descriptor,
 	.bDescriptorType =	USB_DT_CS_INTERFACE,
 	.bDescriptorSubType =	USB_CDC_ACM_TYPE,
 
@@ -179,6 +181,20 @@
 	/* .iInterface = DYNAMIC */
 };
 
+
+static struct usb_interface_assoc_descriptor
+rndis_iad_descriptor = {
+	.bLength =		sizeof rndis_iad_descriptor,
+	.bDescriptorType =	USB_DT_INTERFACE_ASSOCIATION,
+
+	.bFirstInterface =	0, /* XXX, hardcoded */
+	.bInterfaceCount = 	2,	// control + data
+	.bFunctionClass =	USB_CLASS_COMM,
+	.bFunctionSubClass =	USB_CDC_SUBCLASS_ETHERNET,
+	.bFunctionProtocol =	USB_CDC_PROTO_NONE,
+	/* .iFunction = DYNAMIC */
+};
+
 /* full speed support: */
 
 static struct usb_endpoint_descriptor fs_notify_desc __initdata = {
@@ -208,11 +224,12 @@
 };
 
 static struct usb_descriptor_header *eth_fs_function[] __initdata = {
+	(struct usb_descriptor_header *) &rndis_iad_descriptor,
 	/* control interface matches ACM, not Ethernet */
 	(struct usb_descriptor_header *) &rndis_control_intf,
 	(struct usb_descriptor_header *) &header_desc,
 	(struct usb_descriptor_header *) &call_mgmt_descriptor,
-	(struct usb_descriptor_header *) &acm_descriptor,
+	(struct usb_descriptor_header *) &rndis_acm_descriptor,
 	(struct usb_descriptor_header *) &rndis_union_desc,
 	(struct usb_descriptor_header *) &fs_notify_desc,
 	/* data interface has no altsetting */
@@ -252,11 +269,12 @@
 };
 
 static struct usb_descriptor_header *eth_hs_function[] __initdata = {
+	(struct usb_descriptor_header *) &rndis_iad_descriptor,
 	/* control interface matches ACM, not Ethernet */
 	(struct usb_descriptor_header *) &rndis_control_intf,
 	(struct usb_descriptor_header *) &header_desc,
 	(struct usb_descriptor_header *) &call_mgmt_descriptor,
-	(struct usb_descriptor_header *) &acm_descriptor,
+	(struct usb_descriptor_header *) &rndis_acm_descriptor,
 	(struct usb_descriptor_header *) &rndis_union_desc,
 	(struct usb_descriptor_header *) &hs_notify_desc,
 	/* data interface has no altsetting */
@@ -271,6 +289,7 @@
 static struct usb_string rndis_string_defs[] = {
 	[0].s = "RNDIS Communications Control",
 	[1].s = "RNDIS Ethernet Data",
+	[2].s = "RNDIS",
 	{  } /* end of list */
 };
 
@@ -587,6 +606,7 @@
 	if (status < 0)
 		goto fail;
 	rndis->ctrl_id = status;
+	rndis_iad_descriptor.bFirstInterface = status;
 
 	rndis_control_intf.bInterfaceNumber = status;
 	rndis_union_desc.bMasterInterface0 = status;
@@ -798,6 +818,13 @@
 			return status;
 		rndis_string_defs[1].id = status;
 		rndis_data_intf.iInterface = status;
+
+		/* IAD iFunction label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		rndis_string_defs[2].id = status;
+		rndis_iad_descriptor.iFunction = status;
 	}
 
 	/* allocate and initialize one new instance */
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 1e6aa50..fca3407 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -248,8 +248,6 @@
 #include <linux/freezer.h>
 #include <linux/utsname.h>
 
-#include <asm/unaligned.h>
-
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
 
@@ -274,21 +272,20 @@
 #define DRIVER_NAME		"g_file_storage"
 #define DRIVER_VERSION		"20 November 2008"
 
-static const char longname[] = DRIVER_DESC;
-static const char shortname[] = DRIVER_NAME;
+static       char fsg_string_manufacturer[64];
+static const char fsg_string_product[] = DRIVER_DESC;
+static       char fsg_string_serial[13];
+static const char fsg_string_config[] = "Self-powered";
+static const char fsg_string_interface[] = "Mass Storage";
+
+
+#include "storage_common.c"
+
 
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_AUTHOR("Alan Stern");
 MODULE_LICENSE("Dual BSD/GPL");
 
-/* Thanks to NetChip Technologies for donating this product ID.
- *
- * DO NOT REUSE THESE IDs with any other driver!!  Ever!!
- * Instead:  allocate your own, using normal USB-IF procedures. */
-#define DRIVER_VENDOR_ID	0x0525	// NetChip
-#define DRIVER_PRODUCT_ID	0xa4a5	// Linux-USB File-backed Storage Gadget
-
-
 /*
  * This driver assumes self-powered hardware and has no way for users to
  * trigger remote wakeup.  It uses autoconfiguration to select endpoints
@@ -298,54 +295,12 @@
 
 /*-------------------------------------------------------------------------*/
 
-#define LDBG(lun,fmt,args...) \
-	dev_dbg(&(lun)->dev , fmt , ## args)
-#define MDBG(fmt,args...) \
-	pr_debug(DRIVER_NAME ": " fmt , ## args)
-
-#ifndef DEBUG
-#undef VERBOSE_DEBUG
-#undef DUMP_MSGS
-#endif /* !DEBUG */
-
-#ifdef VERBOSE_DEBUG
-#define VLDBG	LDBG
-#else
-#define VLDBG(lun,fmt,args...) \
-	do { } while (0)
-#endif /* VERBOSE_DEBUG */
-
-#define LERROR(lun,fmt,args...) \
-	dev_err(&(lun)->dev , fmt , ## args)
-#define LWARN(lun,fmt,args...) \
-	dev_warn(&(lun)->dev , fmt , ## args)
-#define LINFO(lun,fmt,args...) \
-	dev_info(&(lun)->dev , fmt , ## args)
-
-#define MINFO(fmt,args...) \
-	pr_info(DRIVER_NAME ": " fmt , ## args)
-
-#define DBG(d, fmt, args...) \
-	dev_dbg(&(d)->gadget->dev , fmt , ## args)
-#define VDBG(d, fmt, args...) \
-	dev_vdbg(&(d)->gadget->dev , fmt , ## args)
-#define ERROR(d, fmt, args...) \
-	dev_err(&(d)->gadget->dev , fmt , ## args)
-#define WARNING(d, fmt, args...) \
-	dev_warn(&(d)->gadget->dev , fmt , ## args)
-#define INFO(d, fmt, args...) \
-	dev_info(&(d)->gadget->dev , fmt , ## args)
-
-
-/*-------------------------------------------------------------------------*/
 
 /* Encapsulate the module parameter settings */
 
-#define MAX_LUNS	8
-
 static struct {
-	char		*file[MAX_LUNS];
-	int		ro[MAX_LUNS];
+	char		*file[FSG_MAX_LUNS];
+	int		ro[FSG_MAX_LUNS];
 	unsigned int	num_filenames;
 	unsigned int	num_ros;
 	unsigned int	nluns;
@@ -372,8 +327,8 @@
 	.removable		= 0,
 	.can_stall		= 1,
 	.cdrom			= 0,
-	.vendor			= DRIVER_VENDOR_ID,
-	.product		= DRIVER_PRODUCT_ID,
+	.vendor			= FSG_VENDOR_ID,
+	.product		= FSG_PRODUCT_ID,
 	.release		= 0xffff,	// Use controller chip type
 	.buflen			= 16384,
 	};
@@ -425,125 +380,6 @@
 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
 
 
-/*-------------------------------------------------------------------------*/
-
-/* SCSI device types */
-#define TYPE_DISK	0x00
-#define TYPE_CDROM	0x05
-
-/* USB protocol value = the transport method */
-#define USB_PR_CBI	0x00		// Control/Bulk/Interrupt
-#define USB_PR_CB	0x01		// Control/Bulk w/o interrupt
-#define USB_PR_BULK	0x50		// Bulk-only
-
-/* USB subclass value = the protocol encapsulation */
-#define USB_SC_RBC	0x01		// Reduced Block Commands (flash)
-#define USB_SC_8020	0x02		// SFF-8020i, MMC-2, ATAPI (CD-ROM)
-#define USB_SC_QIC	0x03		// QIC-157 (tape)
-#define USB_SC_UFI	0x04		// UFI (floppy)
-#define USB_SC_8070	0x05		// SFF-8070i (removable)
-#define USB_SC_SCSI	0x06		// Transparent SCSI
-
-/* Bulk-only data structures */
-
-/* Command Block Wrapper */
-struct bulk_cb_wrap {
-	__le32	Signature;		// Contains 'USBC'
-	u32	Tag;			// Unique per command id
-	__le32	DataTransferLength;	// Size of the data
-	u8	Flags;			// Direction in bit 7
-	u8	Lun;			// LUN (normally 0)
-	u8	Length;			// Of the CDB, <= MAX_COMMAND_SIZE
-	u8	CDB[16];		// Command Data Block
-};
-
-#define USB_BULK_CB_WRAP_LEN	31
-#define USB_BULK_CB_SIG		0x43425355	// Spells out USBC
-#define USB_BULK_IN_FLAG	0x80
-
-/* Command Status Wrapper */
-struct bulk_cs_wrap {
-	__le32	Signature;		// Should = 'USBS'
-	u32	Tag;			// Same as original command
-	__le32	Residue;		// Amount not transferred
-	u8	Status;			// See below
-};
-
-#define USB_BULK_CS_WRAP_LEN	13
-#define USB_BULK_CS_SIG		0x53425355	// Spells out 'USBS'
-#define USB_STATUS_PASS		0
-#define USB_STATUS_FAIL		1
-#define USB_STATUS_PHASE_ERROR	2
-
-/* Bulk-only class specific requests */
-#define USB_BULK_RESET_REQUEST		0xff
-#define USB_BULK_GET_MAX_LUN_REQUEST	0xfe
-
-
-/* CBI Interrupt data structure */
-struct interrupt_data {
-	u8	bType;
-	u8	bValue;
-};
-
-#define CBI_INTERRUPT_DATA_LEN		2
-
-/* CBI Accept Device-Specific Command request */
-#define USB_CBI_ADSC_REQUEST		0x00
-
-
-#define MAX_COMMAND_SIZE	16	// Length of a SCSI Command Data Block
-
-/* SCSI commands that we recognize */
-#define SC_FORMAT_UNIT			0x04
-#define SC_INQUIRY			0x12
-#define SC_MODE_SELECT_6		0x15
-#define SC_MODE_SELECT_10		0x55
-#define SC_MODE_SENSE_6			0x1a
-#define SC_MODE_SENSE_10		0x5a
-#define SC_PREVENT_ALLOW_MEDIUM_REMOVAL	0x1e
-#define SC_READ_6			0x08
-#define SC_READ_10			0x28
-#define SC_READ_12			0xa8
-#define SC_READ_CAPACITY		0x25
-#define SC_READ_FORMAT_CAPACITIES	0x23
-#define SC_READ_HEADER			0x44
-#define SC_READ_TOC			0x43
-#define SC_RELEASE			0x17
-#define SC_REQUEST_SENSE		0x03
-#define SC_RESERVE			0x16
-#define SC_SEND_DIAGNOSTIC		0x1d
-#define SC_START_STOP_UNIT		0x1b
-#define SC_SYNCHRONIZE_CACHE		0x35
-#define SC_TEST_UNIT_READY		0x00
-#define SC_VERIFY			0x2f
-#define SC_WRITE_6			0x0a
-#define SC_WRITE_10			0x2a
-#define SC_WRITE_12			0xaa
-
-/* SCSI Sense Key/Additional Sense Code/ASC Qualifier values */
-#define SS_NO_SENSE				0
-#define SS_COMMUNICATION_FAILURE		0x040800
-#define SS_INVALID_COMMAND			0x052000
-#define SS_INVALID_FIELD_IN_CDB			0x052400
-#define SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE	0x052100
-#define SS_LOGICAL_UNIT_NOT_SUPPORTED		0x052500
-#define SS_MEDIUM_NOT_PRESENT			0x023a00
-#define SS_MEDIUM_REMOVAL_PREVENTED		0x055302
-#define SS_NOT_READY_TO_READY_TRANSITION	0x062800
-#define SS_RESET_OCCURRED			0x062900
-#define SS_SAVING_PARAMETERS_NOT_SUPPORTED	0x053900
-#define SS_UNRECOVERED_READ_ERROR		0x031100
-#define SS_WRITE_ERROR				0x030c02
-#define SS_WRITE_PROTECTED			0x072700
-
-#define SK(x)		((u8) ((x) >> 16))	// Sense Key byte, etc.
-#define ASC(x)		((u8) ((x) >> 8))
-#define ASCQ(x)		((u8) (x))
-
-
-/*-------------------------------------------------------------------------*/
-
 /*
  * These definitions will permit the compiler to avoid generating code for
  * parts of the driver that aren't used in the non-TEST version.  Even gcc
@@ -566,81 +402,8 @@
 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
 
 
-struct lun {
-	struct file	*filp;
-	loff_t		file_length;
-	loff_t		num_sectors;
+/*-------------------------------------------------------------------------*/
 
-	unsigned int	ro : 1;
-	unsigned int	prevent_medium_removal : 1;
-	unsigned int	registered : 1;
-	unsigned int	info_valid : 1;
-
-	u32		sense_data;
-	u32		sense_data_info;
-	u32		unit_attention_data;
-
-	struct device	dev;
-};
-
-#define backing_file_is_open(curlun)	((curlun)->filp != NULL)
-
-static struct lun *dev_to_lun(struct device *dev)
-{
-	return container_of(dev, struct lun, dev);
-}
-
-
-/* Big enough to hold our biggest descriptor */
-#define EP0_BUFSIZE	256
-#define DELAYED_STATUS	(EP0_BUFSIZE + 999)	// An impossibly large value
-
-/* Number of buffers we will use.  2 is enough for double-buffering */
-#define NUM_BUFFERS	2
-
-enum fsg_buffer_state {
-	BUF_STATE_EMPTY = 0,
-	BUF_STATE_FULL,
-	BUF_STATE_BUSY
-};
-
-struct fsg_buffhd {
-	void				*buf;
-	enum fsg_buffer_state		state;
-	struct fsg_buffhd		*next;
-
-	/* The NetChip 2280 is faster, and handles some protocol faults
-	 * better, if we don't submit any short bulk-out read requests.
-	 * So we will record the intended request length here. */
-	unsigned int			bulk_out_intended_length;
-
-	struct usb_request		*inreq;
-	int				inreq_busy;
-	struct usb_request		*outreq;
-	int				outreq_busy;
-};
-
-enum fsg_state {
-	FSG_STATE_COMMAND_PHASE = -10,		// This one isn't used anywhere
-	FSG_STATE_DATA_PHASE,
-	FSG_STATE_STATUS_PHASE,
-
-	FSG_STATE_IDLE = 0,
-	FSG_STATE_ABORT_BULK_OUT,
-	FSG_STATE_RESET,
-	FSG_STATE_INTERFACE_CHANGE,
-	FSG_STATE_CONFIG_CHANGE,
-	FSG_STATE_DISCONNECT,
-	FSG_STATE_EXIT,
-	FSG_STATE_TERMINATED
-};
-
-enum data_direction {
-	DATA_DIR_UNKNOWN = 0,
-	DATA_DIR_FROM_HOST,
-	DATA_DIR_TO_HOST,
-	DATA_DIR_NONE
-};
 
 struct fsg_dev {
 	/* lock protects: state, all the req_busy's, and cbbuf_cmnd */
@@ -662,7 +425,7 @@
 	int			intreq_busy;
 	struct fsg_buffhd	*intr_buffhd;
 
- 	unsigned int		bulk_out_maxpacket;
+	unsigned int		bulk_out_maxpacket;
 	enum fsg_state		state;		// For exception handling
 	unsigned int		exception_req_tag;
 
@@ -687,7 +450,7 @@
 
 	struct fsg_buffhd	*next_buffhd_to_fill;
 	struct fsg_buffhd	*next_buffhd_to_drain;
-	struct fsg_buffhd	buffhds[NUM_BUFFERS];
+	struct fsg_buffhd	buffhds[FSG_NUM_BUFFERS];
 
 	int			thread_wakeup_needed;
 	struct completion	thread_notifier;
@@ -712,8 +475,8 @@
 	u8			cbbuf_cmnd[MAX_COMMAND_SIZE];
 
 	unsigned int		nluns;
-	struct lun		*luns;
-	struct lun		*curlun;
+	struct fsg_lun		*luns;
+	struct fsg_lun		*curlun;
 };
 
 typedef void (*fsg_routine_t)(struct fsg_dev *);
@@ -739,49 +502,9 @@
 static struct fsg_dev			*the_fsg;
 static struct usb_gadget_driver		fsg_driver;
 
-static void	close_backing_file(struct lun *curlun);
-
 
 /*-------------------------------------------------------------------------*/
 
-#ifdef DUMP_MSGS
-
-static void dump_msg(struct fsg_dev *fsg, const char *label,
-		const u8 *buf, unsigned int length)
-{
-	if (length < 512) {
-		DBG(fsg, "%s, length %u:\n", label, length);
-		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
-				16, 1, buf, length, 0);
-	}
-}
-
-static void dump_cdb(struct fsg_dev *fsg)
-{}
-
-#else
-
-static void dump_msg(struct fsg_dev *fsg, const char *label,
-		const u8 *buf, unsigned int length)
-{}
-
-#ifdef VERBOSE_DEBUG
-
-static void dump_cdb(struct fsg_dev *fsg)
-{
-	print_hex_dump(KERN_DEBUG, "SCSI CDB: ", DUMP_PREFIX_NONE,
-			16, 1, fsg->cmnd, fsg->cmnd_size, 0);
-}
-
-#else
-
-static void dump_cdb(struct fsg_dev *fsg)
-{}
-
-#endif /* VERBOSE_DEBUG */
-#endif /* DUMP_MSGS */
-
-
 static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
 {
 	const char	*name;
@@ -799,26 +522,11 @@
 
 /*-------------------------------------------------------------------------*/
 
-/* Routines for unaligned data access */
-
-static u32 get_unaligned_be24(u8 *buf)
-{
-	return 0xffffff & (u32) get_unaligned_be32(buf - 1);
-}
-
-
-/*-------------------------------------------------------------------------*/
-
 /*
  * DESCRIPTORS ... most are static, but strings and (full) configuration
  * descriptors are built on demand.  Also the (static) config and interface
  * descriptors are adjusted during fsg_bind().
  */
-#define STRING_MANUFACTURER	1
-#define STRING_PRODUCT		2
-#define STRING_SERIAL		3
-#define STRING_CONFIG		4
-#define STRING_INTERFACE	5
 
 /* There is only one configuration. */
 #define	CONFIG_VALUE		1
@@ -832,13 +540,13 @@
 	.bDeviceClass =		USB_CLASS_PER_INTERFACE,
 
 	/* The next three values can be overridden by module parameters */
-	.idVendor =		cpu_to_le16(DRIVER_VENDOR_ID),
-	.idProduct =		cpu_to_le16(DRIVER_PRODUCT_ID),
+	.idVendor =		cpu_to_le16(FSG_VENDOR_ID),
+	.idProduct =		cpu_to_le16(FSG_PRODUCT_ID),
 	.bcdDevice =		cpu_to_le16(0xffff),
 
-	.iManufacturer =	STRING_MANUFACTURER,
-	.iProduct =		STRING_PRODUCT,
-	.iSerialNumber =	STRING_SERIAL,
+	.iManufacturer =	FSG_STRING_MANUFACTURER,
+	.iProduct =		FSG_STRING_PRODUCT,
+	.iSerialNumber =	FSG_STRING_SERIAL,
 	.bNumConfigurations =	1,
 };
 
@@ -850,86 +558,12 @@
 	/* wTotalLength computed by usb_gadget_config_buf() */
 	.bNumInterfaces =	1,
 	.bConfigurationValue =	CONFIG_VALUE,
-	.iConfiguration =	STRING_CONFIG,
+	.iConfiguration =	FSG_STRING_CONFIG,
 	.bmAttributes =		USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
 	.bMaxPower =		CONFIG_USB_GADGET_VBUS_DRAW / 2,
 };
 
-static struct usb_otg_descriptor
-otg_desc = {
-	.bLength =		sizeof(otg_desc),
-	.bDescriptorType =	USB_DT_OTG,
 
-	.bmAttributes =		USB_OTG_SRP,
-};
-
-/* There is only one interface. */
-
-static struct usb_interface_descriptor
-intf_desc = {
-	.bLength =		sizeof intf_desc,
-	.bDescriptorType =	USB_DT_INTERFACE,
-
-	.bNumEndpoints =	2,		// Adjusted during fsg_bind()
-	.bInterfaceClass =	USB_CLASS_MASS_STORAGE,
-	.bInterfaceSubClass =	USB_SC_SCSI,	// Adjusted during fsg_bind()
-	.bInterfaceProtocol =	USB_PR_BULK,	// Adjusted during fsg_bind()
-	.iInterface =		STRING_INTERFACE,
-};
-
-/* Three full-speed endpoint descriptors: bulk-in, bulk-out,
- * and interrupt-in. */
-
-static struct usb_endpoint_descriptor
-fs_bulk_in_desc = {
-	.bLength =		USB_DT_ENDPOINT_SIZE,
-	.bDescriptorType =	USB_DT_ENDPOINT,
-
-	.bEndpointAddress =	USB_DIR_IN,
-	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
-	/* wMaxPacketSize set by autoconfiguration */
-};
-
-static struct usb_endpoint_descriptor
-fs_bulk_out_desc = {
-	.bLength =		USB_DT_ENDPOINT_SIZE,
-	.bDescriptorType =	USB_DT_ENDPOINT,
-
-	.bEndpointAddress =	USB_DIR_OUT,
-	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
-	/* wMaxPacketSize set by autoconfiguration */
-};
-
-static struct usb_endpoint_descriptor
-fs_intr_in_desc = {
-	.bLength =		USB_DT_ENDPOINT_SIZE,
-	.bDescriptorType =	USB_DT_ENDPOINT,
-
-	.bEndpointAddress =	USB_DIR_IN,
-	.bmAttributes =		USB_ENDPOINT_XFER_INT,
-	.wMaxPacketSize =	cpu_to_le16(2),
-	.bInterval =		32,	// frames -> 32 ms
-};
-
-static const struct usb_descriptor_header *fs_function[] = {
-	(struct usb_descriptor_header *) &otg_desc,
-	(struct usb_descriptor_header *) &intf_desc,
-	(struct usb_descriptor_header *) &fs_bulk_in_desc,
-	(struct usb_descriptor_header *) &fs_bulk_out_desc,
-	(struct usb_descriptor_header *) &fs_intr_in_desc,
-	NULL,
-};
-#define FS_FUNCTION_PRE_EP_ENTRIES	2
-
-
-/*
- * USB 2.0 devices need to expose both high speed and full speed
- * descriptors, unless they only run at full speed.
- *
- * That means alternate endpoint descriptors (bigger packets)
- * and a "device qualifier" ... plus more construction options
- * for the config descriptor.
- */
 static struct usb_qualifier_descriptor
 dev_qualifier = {
 	.bLength =		sizeof dev_qualifier,
@@ -941,78 +575,6 @@
 	.bNumConfigurations =	1,
 };
 
-static struct usb_endpoint_descriptor
-hs_bulk_in_desc = {
-	.bLength =		USB_DT_ENDPOINT_SIZE,
-	.bDescriptorType =	USB_DT_ENDPOINT,
-
-	/* bEndpointAddress copied from fs_bulk_in_desc during fsg_bind() */
-	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
-	.wMaxPacketSize =	cpu_to_le16(512),
-};
-
-static struct usb_endpoint_descriptor
-hs_bulk_out_desc = {
-	.bLength =		USB_DT_ENDPOINT_SIZE,
-	.bDescriptorType =	USB_DT_ENDPOINT,
-
-	/* bEndpointAddress copied from fs_bulk_out_desc during fsg_bind() */
-	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
-	.wMaxPacketSize =	cpu_to_le16(512),
-	.bInterval =		1,	// NAK every 1 uframe
-};
-
-static struct usb_endpoint_descriptor
-hs_intr_in_desc = {
-	.bLength =		USB_DT_ENDPOINT_SIZE,
-	.bDescriptorType =	USB_DT_ENDPOINT,
-
-	/* bEndpointAddress copied from fs_intr_in_desc during fsg_bind() */
-	.bmAttributes =		USB_ENDPOINT_XFER_INT,
-	.wMaxPacketSize =	cpu_to_le16(2),
-	.bInterval =		9,	// 2**(9-1) = 256 uframes -> 32 ms
-};
-
-static const struct usb_descriptor_header *hs_function[] = {
-	(struct usb_descriptor_header *) &otg_desc,
-	(struct usb_descriptor_header *) &intf_desc,
-	(struct usb_descriptor_header *) &hs_bulk_in_desc,
-	(struct usb_descriptor_header *) &hs_bulk_out_desc,
-	(struct usb_descriptor_header *) &hs_intr_in_desc,
-	NULL,
-};
-#define HS_FUNCTION_PRE_EP_ENTRIES	2
-
-/* Maxpacket and other transfer characteristics vary by speed. */
-static struct usb_endpoint_descriptor *
-ep_desc(struct usb_gadget *g, struct usb_endpoint_descriptor *fs,
-		struct usb_endpoint_descriptor *hs)
-{
-	if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
-		return hs;
-	return fs;
-}
-
-
-/* The CBI specification limits the serial string to 12 uppercase hexadecimal
- * characters. */
-static char				manufacturer[64];
-static char				serial[13];
-
-/* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */
-static struct usb_string		strings[] = {
-	{STRING_MANUFACTURER,	manufacturer},
-	{STRING_PRODUCT,	longname},
-	{STRING_SERIAL,		serial},
-	{STRING_CONFIG,		"Self-powered"},
-	{STRING_INTERFACE,	"Mass Storage"},
-	{}
-};
-
-static struct usb_gadget_strings	stringtab = {
-	.language	= 0x0409,		// en-us
-	.strings	= strings,
-};
 
 
 /*
@@ -1032,10 +594,9 @@
 
 	if (gadget_is_dualspeed(gadget) && type == USB_DT_OTHER_SPEED_CONFIG)
 		speed = (USB_SPEED_FULL + USB_SPEED_HIGH) - speed;
-	if (gadget_is_dualspeed(gadget) && speed == USB_SPEED_HIGH)
-		function = hs_function;
-	else
-		function = fs_function;
+	function = gadget_is_dualspeed(gadget) && speed == USB_SPEED_HIGH
+		? (const struct usb_descriptor_header **)fsg_hs_function
+		: (const struct usb_descriptor_header **)fsg_fs_function;
 
 	/* for now, don't advertise srp-only devices */
 	if (!gadget_is_otg(gadget))
@@ -1386,7 +947,7 @@
 			VDBG(fsg, "get string descriptor\n");
 
 			/* wIndex == language code */
-			value = usb_gadget_get_string(&stringtab,
+			value = usb_gadget_get_string(&fsg_stringtab,
 					w_value & 0xff, req->buf);
 			break;
 		}
@@ -1551,7 +1112,7 @@
 
 static int do_read(struct fsg_dev *fsg)
 {
-	struct lun		*curlun = fsg->curlun;
+	struct fsg_lun		*curlun = fsg->curlun;
 	u32			lba;
 	struct fsg_buffhd	*bh;
 	int			rc;
@@ -1677,7 +1238,7 @@
 
 static int do_write(struct fsg_dev *fsg)
 {
-	struct lun		*curlun = fsg->curlun;
+	struct fsg_lun		*curlun = fsg->curlun;
 	u32			lba;
 	struct fsg_buffhd	*bh;
 	int			get_some_more;
@@ -1864,33 +1425,14 @@
 
 /*-------------------------------------------------------------------------*/
 
-/* Sync the file data, don't bother with the metadata.
- * This code was copied from fs/buffer.c:sys_fdatasync(). */
-static int fsync_sub(struct lun *curlun)
-{
-	struct file	*filp = curlun->filp;
-
-	if (curlun->ro || !filp)
-		return 0;
-	return vfs_fsync(filp, filp->f_path.dentry, 1);
-}
-
-static void fsync_all(struct fsg_dev *fsg)
-{
-	int	i;
-
-	for (i = 0; i < fsg->nluns; ++i)
-		fsync_sub(&fsg->luns[i]);
-}
-
 static int do_synchronize_cache(struct fsg_dev *fsg)
 {
-	struct lun	*curlun = fsg->curlun;
+	struct fsg_lun	*curlun = fsg->curlun;
 	int		rc;
 
 	/* We ignore the requested LBA and write out all file's
 	 * dirty data buffers. */
-	rc = fsync_sub(curlun);
+	rc = fsg_lun_fsync_sub(curlun);
 	if (rc)
 		curlun->sense_data = SS_WRITE_ERROR;
 	return 0;
@@ -1899,7 +1441,7 @@
 
 /*-------------------------------------------------------------------------*/
 
-static void invalidate_sub(struct lun *curlun)
+static void invalidate_sub(struct fsg_lun *curlun)
 {
 	struct file	*filp = curlun->filp;
 	struct inode	*inode = filp->f_path.dentry->d_inode;
@@ -1911,7 +1453,7 @@
 
 static int do_verify(struct fsg_dev *fsg)
 {
-	struct lun		*curlun = fsg->curlun;
+	struct fsg_lun		*curlun = fsg->curlun;
 	u32			lba;
 	u32			verification_length;
 	struct fsg_buffhd	*bh = fsg->next_buffhd_to_fill;
@@ -1944,7 +1486,7 @@
 	file_offset = ((loff_t) lba) << 9;
 
 	/* Write out all the dirty buffers before invalidating them */
-	fsync_sub(curlun);
+	fsg_lun_fsync_sub(curlun);
 	if (signal_pending(current))
 		return -EINTR;
 
@@ -2041,7 +1583,7 @@
 
 static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
 {
-	struct lun	*curlun = fsg->curlun;
+	struct fsg_lun	*curlun = fsg->curlun;
 	u8		*buf = (u8 *) bh->buf;
 	u32		sd, sdinfo;
 	int		valid;
@@ -2095,7 +1637,7 @@
 
 static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
 {
-	struct lun	*curlun = fsg->curlun;
+	struct fsg_lun	*curlun = fsg->curlun;
 	u32		lba = get_unaligned_be32(&fsg->cmnd[2]);
 	int		pmi = fsg->cmnd[8];
 	u8		*buf = (u8 *) bh->buf;
@@ -2113,27 +1655,9 @@
 }
 
 
-static void store_cdrom_address(u8 *dest, int msf, u32 addr)
-{
-	if (msf) {
-		/* Convert to Minutes-Seconds-Frames */
-		addr >>= 2;		/* Convert to 2048-byte frames */
-		addr += 2*75;		/* Lead-in occupies 2 seconds */
-		dest[3] = addr % 75;	/* Frames */
-		addr /= 75;
-		dest[2] = addr % 60;	/* Seconds */
-		addr /= 60;
-		dest[1] = addr;		/* Minutes */
-		dest[0] = 0;		/* Reserved */
-	} else {
-		/* Absolute sector */
-		put_unaligned_be32(addr, dest);
-	}
-}
-
 static int do_read_header(struct fsg_dev *fsg, struct fsg_buffhd *bh)
 {
-	struct lun	*curlun = fsg->curlun;
+	struct fsg_lun	*curlun = fsg->curlun;
 	int		msf = fsg->cmnd[1] & 0x02;
 	u32		lba = get_unaligned_be32(&fsg->cmnd[2]);
 	u8		*buf = (u8 *) bh->buf;
@@ -2156,7 +1680,7 @@
 
 static int do_read_toc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
 {
-	struct lun	*curlun = fsg->curlun;
+	struct fsg_lun	*curlun = fsg->curlun;
 	int		msf = fsg->cmnd[1] & 0x02;
 	int		start_track = fsg->cmnd[6];
 	u8		*buf = (u8 *) bh->buf;
@@ -2184,7 +1708,7 @@
 
 static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
 {
-	struct lun	*curlun = fsg->curlun;
+	struct fsg_lun	*curlun = fsg->curlun;
 	int		mscmnd = fsg->cmnd[0];
 	u8		*buf = (u8 *) bh->buf;
 	u8		*buf0 = buf;
@@ -2265,7 +1789,7 @@
 
 static int do_start_stop(struct fsg_dev *fsg)
 {
-	struct lun	*curlun = fsg->curlun;
+	struct fsg_lun	*curlun = fsg->curlun;
 	int		loej, start;
 
 	if (!mod_data.removable) {
@@ -2295,7 +1819,7 @@
 		if (loej) {		// Simulate an unload/eject
 			up_read(&fsg->filesem);
 			down_write(&fsg->filesem);
-			close_backing_file(curlun);
+			fsg_lun_close(curlun);
 			up_write(&fsg->filesem);
 			down_read(&fsg->filesem);
 		}
@@ -2303,7 +1827,7 @@
 
 		/* Our emulation doesn't support mounting; the medium is
 		 * available for use as soon as it is loaded. */
-		if (!backing_file_is_open(curlun)) {
+		if (!fsg_lun_is_open(curlun)) {
 			curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
 			return -EINVAL;
 		}
@@ -2315,7 +1839,7 @@
 
 static int do_prevent_allow(struct fsg_dev *fsg)
 {
-	struct lun	*curlun = fsg->curlun;
+	struct fsg_lun	*curlun = fsg->curlun;
 	int		prevent;
 
 	if (!mod_data.removable) {
@@ -2330,7 +1854,7 @@
 	}
 
 	if (curlun->prevent_medium_removal && !prevent)
-		fsync_sub(curlun);
+		fsg_lun_fsync_sub(curlun);
 	curlun->prevent_medium_removal = prevent;
 	return 0;
 }
@@ -2339,7 +1863,7 @@
 static int do_read_format_capacities(struct fsg_dev *fsg,
 			struct fsg_buffhd *bh)
 {
-	struct lun	*curlun = fsg->curlun;
+	struct fsg_lun	*curlun = fsg->curlun;
 	u8		*buf = (u8 *) bh->buf;
 
 	buf[0] = buf[1] = buf[2] = 0;
@@ -2356,7 +1880,7 @@
 
 static int do_mode_select(struct fsg_dev *fsg, struct fsg_buffhd *bh)
 {
-	struct lun	*curlun = fsg->curlun;
+	struct fsg_lun	*curlun = fsg->curlun;
 
 	/* We don't support MODE SELECT */
 	curlun->sense_data = SS_INVALID_COMMAND;
@@ -2599,7 +2123,7 @@
 
 static int send_status(struct fsg_dev *fsg)
 {
-	struct lun		*curlun = fsg->curlun;
+	struct fsg_lun		*curlun = fsg->curlun;
 	struct fsg_buffhd	*bh;
 	int			rc;
 	u8			status = USB_STATUS_PASS;
@@ -2691,7 +2215,7 @@
 	int			lun = fsg->cmnd[1] >> 5;
 	static const char	dirletter[4] = {'u', 'o', 'i', 'n'};
 	char			hdlen[20];
-	struct lun		*curlun;
+	struct fsg_lun		*curlun;
 
 	/* Adjust the expected cmnd_size for protocol encapsulation padding.
 	 * Transparent SCSI doesn't pad. */
@@ -2820,7 +2344,7 @@
 
 	/* If the medium isn't mounted and the command needs to access
 	 * it, return an error. */
-	if (curlun && !backing_file_is_open(curlun) && needs_medium) {
+	if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
 		curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
 		return -EINVAL;
 	}
@@ -3075,8 +2599,8 @@
 
 static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
 {
-	struct usb_request	*req = bh->outreq;
-	struct bulk_cb_wrap	*cbw = req->buf;
+	struct usb_request		*req = bh->outreq;
+	struct fsg_bulk_cb_wrap	*cbw = req->buf;
 
 	/* Was this a real packet?  Should it be ignored? */
 	if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
@@ -3105,7 +2629,7 @@
 	}
 
 	/* Is the CBW meaningful? */
-	if (cbw->Lun >= MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
+	if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
 			cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
 		DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
 				"cmdlen %u\n",
@@ -3238,7 +2762,7 @@
 
 reset:
 	/* Deallocate the requests */
-	for (i = 0; i < NUM_BUFFERS; ++i) {
+	for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
 		struct fsg_buffhd *bh = &fsg->buffhds[i];
 
 		if (bh->inreq) {
@@ -3276,12 +2800,14 @@
 	DBG(fsg, "set interface %d\n", altsetting);
 
 	/* Enable the endpoints */
-	d = ep_desc(fsg->gadget, &fs_bulk_in_desc, &hs_bulk_in_desc);
+	d = fsg_ep_desc(fsg->gadget,
+			&fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc);
 	if ((rc = enable_endpoint(fsg, fsg->bulk_in, d)) != 0)
 		goto reset;
 	fsg->bulk_in_enabled = 1;
 
-	d = ep_desc(fsg->gadget, &fs_bulk_out_desc, &hs_bulk_out_desc);
+	d = fsg_ep_desc(fsg->gadget,
+			&fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc);
 	if ((rc = enable_endpoint(fsg, fsg->bulk_out, d)) != 0)
 		goto reset;
 	fsg->bulk_out_enabled = 1;
@@ -3289,14 +2815,15 @@
 	clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
 
 	if (transport_is_cbi()) {
-		d = ep_desc(fsg->gadget, &fs_intr_in_desc, &hs_intr_in_desc);
+		d = fsg_ep_desc(fsg->gadget,
+				&fsg_fs_intr_in_desc, &fsg_hs_intr_in_desc);
 		if ((rc = enable_endpoint(fsg, fsg->intr_in, d)) != 0)
 			goto reset;
 		fsg->intr_in_enabled = 1;
 	}
 
 	/* Allocate the requests */
-	for (i = 0; i < NUM_BUFFERS; ++i) {
+	for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
 		struct fsg_buffhd	*bh = &fsg->buffhds[i];
 
 		if ((rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq)) != 0)
@@ -3372,7 +2899,7 @@
 	struct fsg_buffhd	*bh;
 	enum fsg_state		old_state;
 	u8			new_config;
-	struct lun		*curlun;
+	struct fsg_lun		*curlun;
 	unsigned int		exception_req_tag;
 	int			rc;
 
@@ -3392,7 +2919,7 @@
 	/* Cancel all the pending transfers */
 	if (fsg->intreq_busy)
 		usb_ep_dequeue(fsg->intr_in, fsg->intreq);
-	for (i = 0; i < NUM_BUFFERS; ++i) {
+	for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
 		bh = &fsg->buffhds[i];
 		if (bh->inreq_busy)
 			usb_ep_dequeue(fsg->bulk_in, bh->inreq);
@@ -3403,7 +2930,7 @@
 	/* Wait until everything is idle */
 	for (;;) {
 		num_active = fsg->intreq_busy;
-		for (i = 0; i < NUM_BUFFERS; ++i) {
+		for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
 			bh = &fsg->buffhds[i];
 			num_active += bh->inreq_busy + bh->outreq_busy;
 		}
@@ -3425,7 +2952,7 @@
 	 * state, and the exception.  Then invoke the handler. */
 	spin_lock_irq(&fsg->lock);
 
-	for (i = 0; i < NUM_BUFFERS; ++i) {
+	for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
 		bh = &fsg->buffhds[i];
 		bh->state = BUF_STATE_EMPTY;
 	}
@@ -3506,7 +3033,8 @@
 		break;
 
 	case FSG_STATE_DISCONNECT:
-		fsync_all(fsg);
+		for (i = 0; i < fsg->nluns; ++i)
+			fsg_lun_fsync_sub(fsg->luns + i);
 		do_set_config(fsg, 0);		// Unconfigured state
 		break;
 
@@ -3595,201 +3123,10 @@
 
 /*-------------------------------------------------------------------------*/
 
-/* If the next two routines are called while the gadget is registered,
- * the caller must own fsg->filesem for writing. */
-
-static int open_backing_file(struct lun *curlun, const char *filename)
-{
-	int				ro;
-	struct file			*filp = NULL;
-	int				rc = -EINVAL;
-	struct inode			*inode = NULL;
-	loff_t				size;
-	loff_t				num_sectors;
-	loff_t				min_sectors;
-
-	/* R/W if we can, R/O if we must */
-	ro = curlun->ro;
-	if (!ro) {
-		filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0);
-		if (-EROFS == PTR_ERR(filp))
-			ro = 1;
-	}
-	if (ro)
-		filp = filp_open(filename, O_RDONLY | O_LARGEFILE, 0);
-	if (IS_ERR(filp)) {
-		LINFO(curlun, "unable to open backing file: %s\n", filename);
-		return PTR_ERR(filp);
-	}
-
-	if (!(filp->f_mode & FMODE_WRITE))
-		ro = 1;
-
-	if (filp->f_path.dentry)
-		inode = filp->f_path.dentry->d_inode;
-	if (inode && S_ISBLK(inode->i_mode)) {
-		if (bdev_read_only(inode->i_bdev))
-			ro = 1;
-	} else if (!inode || !S_ISREG(inode->i_mode)) {
-		LINFO(curlun, "invalid file type: %s\n", filename);
-		goto out;
-	}
-
-	/* If we can't read the file, it's no good.
-	 * If we can't write the file, use it read-only. */
-	if (!filp->f_op || !(filp->f_op->read || filp->f_op->aio_read)) {
-		LINFO(curlun, "file not readable: %s\n", filename);
-		goto out;
-	}
-	if (!(filp->f_op->write || filp->f_op->aio_write))
-		ro = 1;
-
-	size = i_size_read(inode->i_mapping->host);
-	if (size < 0) {
-		LINFO(curlun, "unable to find file size: %s\n", filename);
-		rc = (int) size;
-		goto out;
-	}
-	num_sectors = size >> 9;	// File size in 512-byte blocks
-	min_sectors = 1;
-	if (mod_data.cdrom) {
-		num_sectors &= ~3;	// Reduce to a multiple of 2048
-		min_sectors = 300*4;	// Smallest track is 300 frames
-		if (num_sectors >= 256*60*75*4) {
-			num_sectors = (256*60*75 - 1) * 4;
-			LINFO(curlun, "file too big: %s\n", filename);
-			LINFO(curlun, "using only first %d blocks\n",
-					(int) num_sectors);
-		}
-	}
-	if (num_sectors < min_sectors) {
-		LINFO(curlun, "file too small: %s\n", filename);
-		rc = -ETOOSMALL;
-		goto out;
-	}
-
-	get_file(filp);
-	curlun->ro = ro;
-	curlun->filp = filp;
-	curlun->file_length = size;
-	curlun->num_sectors = num_sectors;
-	LDBG(curlun, "open backing file: %s\n", filename);
-	rc = 0;
-
-out:
-	filp_close(filp, current->files);
-	return rc;
-}
-
-
-static void close_backing_file(struct lun *curlun)
-{
-	if (curlun->filp) {
-		LDBG(curlun, "close backing file\n");
-		fput(curlun->filp);
-		curlun->filp = NULL;
-	}
-}
-
-
-static ssize_t show_ro(struct device *dev, struct device_attribute *attr, char *buf)
-{
-	struct lun	*curlun = dev_to_lun(dev);
-
-	return sprintf(buf, "%d\n", curlun->ro);
-}
-
-static ssize_t show_file(struct device *dev, struct device_attribute *attr,
-		char *buf)
-{
-	struct lun	*curlun = dev_to_lun(dev);
-	struct fsg_dev	*fsg = dev_get_drvdata(dev);
-	char		*p;
-	ssize_t		rc;
-
-	down_read(&fsg->filesem);
-	if (backing_file_is_open(curlun)) {	// Get the complete pathname
-		p = d_path(&curlun->filp->f_path, buf, PAGE_SIZE - 1);
-		if (IS_ERR(p))
-			rc = PTR_ERR(p);
-		else {
-			rc = strlen(p);
-			memmove(buf, p, rc);
-			buf[rc] = '\n';		// Add a newline
-			buf[++rc] = 0;
-		}
-	} else {				// No file, return 0 bytes
-		*buf = 0;
-		rc = 0;
-	}
-	up_read(&fsg->filesem);
-	return rc;
-}
-
-
-static ssize_t store_ro(struct device *dev, struct device_attribute *attr,
-		const char *buf, size_t count)
-{
-	ssize_t		rc = count;
-	struct lun	*curlun = dev_to_lun(dev);
-	struct fsg_dev	*fsg = dev_get_drvdata(dev);
-	int		i;
-
-	if (sscanf(buf, "%d", &i) != 1)
-		return -EINVAL;
-
-	/* Allow the write-enable status to change only while the backing file
-	 * is closed. */
-	down_read(&fsg->filesem);
-	if (backing_file_is_open(curlun)) {
-		LDBG(curlun, "read-only status change prevented\n");
-		rc = -EBUSY;
-	} else {
-		curlun->ro = !!i;
-		LDBG(curlun, "read-only status set to %d\n", curlun->ro);
-	}
-	up_read(&fsg->filesem);
-	return rc;
-}
-
-static ssize_t store_file(struct device *dev, struct device_attribute *attr,
-		const char *buf, size_t count)
-{
-	struct lun	*curlun = dev_to_lun(dev);
-	struct fsg_dev	*fsg = dev_get_drvdata(dev);
-	int		rc = 0;
-
-	if (curlun->prevent_medium_removal && backing_file_is_open(curlun)) {
-		LDBG(curlun, "eject attempt prevented\n");
-		return -EBUSY;				// "Door is locked"
-	}
-
-	/* Remove a trailing newline */
-	if (count > 0 && buf[count-1] == '\n')
-		((char *) buf)[count-1] = 0;		// Ugh!
-
-	/* Eject current medium */
-	down_write(&fsg->filesem);
-	if (backing_file_is_open(curlun)) {
-		close_backing_file(curlun);
-		curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
-	}
-
-	/* Load new medium */
-	if (count > 0 && buf[0]) {
-		rc = open_backing_file(curlun, buf);
-		if (rc == 0)
-			curlun->unit_attention_data =
-					SS_NOT_READY_TO_READY_TRANSITION;
-	}
-	up_write(&fsg->filesem);
-	return (rc < 0 ? rc : count);
-}
-
 
 /* The write permissions and store_xxx pointers are set in fsg_bind() */
-static DEVICE_ATTR(ro, 0444, show_ro, NULL);
-static DEVICE_ATTR(file, 0444, show_file, NULL);
+static DEVICE_ATTR(ro, 0444, fsg_show_ro, NULL);
+static DEVICE_ATTR(file, 0444, fsg_show_file, NULL);
 
 
 /*-------------------------------------------------------------------------*/
@@ -3804,7 +3141,9 @@
 
 static void lun_release(struct device *dev)
 {
-	struct fsg_dev	*fsg = dev_get_drvdata(dev);
+	struct rw_semaphore	*filesem = dev_get_drvdata(dev);
+	struct fsg_dev		*fsg =
+		container_of(filesem, struct fsg_dev, filesem);
 
 	kref_put(&fsg->ref, fsg_release);
 }
@@ -3813,7 +3152,7 @@
 {
 	struct fsg_dev		*fsg = get_gadget_data(gadget);
 	int			i;
-	struct lun		*curlun;
+	struct fsg_lun		*curlun;
 	struct usb_request	*req = fsg->ep0req;
 
 	DBG(fsg, "unbind\n");
@@ -3825,7 +3164,7 @@
 		if (curlun->registered) {
 			device_remove_file(&curlun->dev, &dev_attr_ro);
 			device_remove_file(&curlun->dev, &dev_attr_file);
-			close_backing_file(curlun);
+			fsg_lun_close(curlun);
 			device_unregister(&curlun->dev);
 			curlun->registered = 0;
 		}
@@ -3841,7 +3180,7 @@
 	}
 
 	/* Free the data buffers */
-	for (i = 0; i < NUM_BUFFERS; ++i)
+	for (i = 0; i < FSG_NUM_BUFFERS; ++i)
 		kfree(fsg->buffhds[i].buf);
 
 	/* Free the request and buffer for endpoint 0 */
@@ -3948,7 +3287,7 @@
 	struct fsg_dev		*fsg = the_fsg;
 	int			rc;
 	int			i;
-	struct lun		*curlun;
+	struct fsg_lun		*curlun;
 	struct usb_ep		*ep;
 	struct usb_request	*req;
 	char			*pathbuf, *p;
@@ -3963,10 +3302,10 @@
 
 	if (mod_data.removable) {	// Enable the store_xxx attributes
 		dev_attr_file.attr.mode = 0644;
-		dev_attr_file.store = store_file;
+		dev_attr_file.store = fsg_store_file;
 		if (!mod_data.cdrom) {
 			dev_attr_ro.attr.mode = 0644;
-			dev_attr_ro.store = store_ro;
+			dev_attr_ro.store = fsg_store_ro;
 		}
 	}
 
@@ -3974,7 +3313,7 @@
 	i = mod_data.nluns;
 	if (i == 0)
 		i = max(mod_data.num_filenames, 1u);
-	if (i > MAX_LUNS) {
+	if (i > FSG_MAX_LUNS) {
 		ERROR(fsg, "invalid number of LUNs: %d\n", i);
 		rc = -EINVAL;
 		goto out;
@@ -3982,7 +3321,7 @@
 
 	/* Create the LUNs, open their backing files, and register the
 	 * LUN devices in sysfs. */
-	fsg->luns = kzalloc(i * sizeof(struct lun), GFP_KERNEL);
+	fsg->luns = kzalloc(i * sizeof(struct fsg_lun), GFP_KERNEL);
 	if (!fsg->luns) {
 		rc = -ENOMEM;
 		goto out;
@@ -3991,13 +3330,14 @@
 
 	for (i = 0; i < fsg->nluns; ++i) {
 		curlun = &fsg->luns[i];
-		curlun->ro = mod_data.ro[i];
-		if (mod_data.cdrom)
-			curlun->ro = 1;
+		curlun->cdrom = !!mod_data.cdrom;
+		curlun->ro = mod_data.cdrom || mod_data.ro[i];
+		curlun->initially_ro = curlun->ro;
+		curlun->removable = mod_data.removable;
 		curlun->dev.release = lun_release;
 		curlun->dev.parent = &gadget->dev;
 		curlun->dev.driver = &fsg_driver.driver;
-		dev_set_drvdata(&curlun->dev, fsg);
+		dev_set_drvdata(&curlun->dev, &fsg->filesem);
 		dev_set_name(&curlun->dev,"%s-lun%d",
 			     dev_name(&gadget->dev), i);
 
@@ -4016,7 +3356,7 @@
 		kref_get(&fsg->ref);
 
 		if (mod_data.file[i] && *mod_data.file[i]) {
-			if ((rc = open_backing_file(curlun,
+			if ((rc = fsg_lun_open(curlun,
 					mod_data.file[i])) != 0)
 				goto out;
 		} else if (!mod_data.removable) {
@@ -4028,20 +3368,20 @@
 
 	/* Find all the endpoints we will use */
 	usb_ep_autoconfig_reset(gadget);
-	ep = usb_ep_autoconfig(gadget, &fs_bulk_in_desc);
+	ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
 	if (!ep)
 		goto autoconf_fail;
 	ep->driver_data = fsg;		// claim the endpoint
 	fsg->bulk_in = ep;
 
-	ep = usb_ep_autoconfig(gadget, &fs_bulk_out_desc);
+	ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
 	if (!ep)
 		goto autoconf_fail;
 	ep->driver_data = fsg;		// claim the endpoint
 	fsg->bulk_out = ep;
 
 	if (transport_is_cbi()) {
-		ep = usb_ep_autoconfig(gadget, &fs_intr_in_desc);
+		ep = usb_ep_autoconfig(gadget, &fsg_fs_intr_in_desc);
 		if (!ep)
 			goto autoconf_fail;
 		ep->driver_data = fsg;		// claim the endpoint
@@ -4055,28 +3395,28 @@
 	device_desc.bcdDevice = cpu_to_le16(mod_data.release);
 
 	i = (transport_is_cbi() ? 3 : 2);	// Number of endpoints
-	intf_desc.bNumEndpoints = i;
-	intf_desc.bInterfaceSubClass = mod_data.protocol_type;
-	intf_desc.bInterfaceProtocol = mod_data.transport_type;
-	fs_function[i + FS_FUNCTION_PRE_EP_ENTRIES] = NULL;
+	fsg_intf_desc.bNumEndpoints = i;
+	fsg_intf_desc.bInterfaceSubClass = mod_data.protocol_type;
+	fsg_intf_desc.bInterfaceProtocol = mod_data.transport_type;
+	fsg_fs_function[i + FSG_FS_FUNCTION_PRE_EP_ENTRIES] = NULL;
 
 	if (gadget_is_dualspeed(gadget)) {
-		hs_function[i + HS_FUNCTION_PRE_EP_ENTRIES] = NULL;
+		fsg_hs_function[i + FSG_HS_FUNCTION_PRE_EP_ENTRIES] = NULL;
 
 		/* Assume ep0 uses the same maxpacket value for both speeds */
 		dev_qualifier.bMaxPacketSize0 = fsg->ep0->maxpacket;
 
 		/* Assume endpoint addresses are the same for both speeds */
-		hs_bulk_in_desc.bEndpointAddress =
-				fs_bulk_in_desc.bEndpointAddress;
-		hs_bulk_out_desc.bEndpointAddress =
-				fs_bulk_out_desc.bEndpointAddress;
-		hs_intr_in_desc.bEndpointAddress =
-				fs_intr_in_desc.bEndpointAddress;
+		fsg_hs_bulk_in_desc.bEndpointAddress =
+			fsg_fs_bulk_in_desc.bEndpointAddress;
+		fsg_hs_bulk_out_desc.bEndpointAddress =
+			fsg_fs_bulk_out_desc.bEndpointAddress;
+		fsg_hs_intr_in_desc.bEndpointAddress =
+			fsg_fs_intr_in_desc.bEndpointAddress;
 	}
 
 	if (gadget_is_otg(gadget))
-		otg_desc.bmAttributes |= USB_OTG_HNP;
+		fsg_otg_desc.bmAttributes |= USB_OTG_HNP;
 
 	rc = -ENOMEM;
 
@@ -4090,7 +3430,7 @@
 	req->complete = ep0_complete;
 
 	/* Allocate the data buffers */
-	for (i = 0; i < NUM_BUFFERS; ++i) {
+	for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
 		struct fsg_buffhd	*bh = &fsg->buffhds[i];
 
 		/* Allocate for the bulk-in endpoint.  We assume that
@@ -4101,23 +3441,24 @@
 			goto out;
 		bh->next = bh + 1;
 	}
-	fsg->buffhds[NUM_BUFFERS - 1].next = &fsg->buffhds[0];
+	fsg->buffhds[FSG_NUM_BUFFERS - 1].next = &fsg->buffhds[0];
 
 	/* This should reflect the actual gadget power source */
 	usb_gadget_set_selfpowered(gadget);
 
-	snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
+	snprintf(fsg_string_manufacturer, sizeof fsg_string_manufacturer,
+			"%s %s with %s",
 			init_utsname()->sysname, init_utsname()->release,
 			gadget->name);
 
 	/* On a real device, serial[] would be loaded from permanent
 	 * storage.  We just encode it from the driver version string. */
-	for (i = 0; i < sizeof(serial) - 2; i += 2) {
+	for (i = 0; i < sizeof fsg_string_serial - 2; i += 2) {
 		unsigned char		c = DRIVER_VERSION[i / 2];
 
 		if (!c)
 			break;
-		sprintf(&serial[i], "%02X", c);
+		sprintf(&fsg_string_serial[i], "%02X", c);
 	}
 
 	fsg->thread_task = kthread_create(fsg_main_thread, fsg,
@@ -4133,7 +3474,7 @@
 	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
 	for (i = 0; i < fsg->nluns; ++i) {
 		curlun = &fsg->luns[i];
-		if (backing_file_is_open(curlun)) {
+		if (fsg_lun_is_open(curlun)) {
 			p = NULL;
 			if (pathbuf) {
 				p = d_path(&curlun->filp->f_path,
@@ -4203,7 +3544,7 @@
 #else
 	.speed		= USB_SPEED_FULL,
 #endif
-	.function	= (char *) longname,
+	.function	= (char *) fsg_string_product,
 	.bind		= fsg_bind,
 	.unbind		= fsg_unbind,
 	.disconnect	= fsg_disconnect,
@@ -4212,7 +3553,7 @@
 	.resume		= fsg_resume,
 
 	.driver		= {
-		.name		= (char *) shortname,
+		.name		= DRIVER_NAME,
 		.owner		= THIS_MODULE,
 		// .release = ...
 		// .suspend = ...
diff --git a/drivers/usb/gadget/mass_storage.c b/drivers/usb/gadget/mass_storage.c
new file mode 100644
index 0000000..19619fb
--- /dev/null
+++ b/drivers/usb/gadget/mass_storage.c
@@ -0,0 +1,240 @@
+/*
+ * mass_storage.c -- Mass Storage USB Gadget
+ *
+ * Copyright (C) 2003-2008 Alan Stern
+ * Copyright (C) 2009 Samsung Electronics
+ *                    Author: Michal Nazarewicz <m.nazarewicz@samsung.com>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+
+/*
+ * The Mass Storage Gadget acts as a USB Mass Storage device,
+ * appearing to the host as a disk drive or as a CD-ROM drive.  In
+ * addition to providing an example of a genuinely useful gadget
+ * driver for a USB device, it also illustrates a technique of
+ * double-buffering for increased throughput.  Last but not least, it
+ * gives an easy way to probe the behavior of the Mass Storage drivers
+ * in a USB host.
+ *
+ * Since this file serves only administrative purposes and all the
+ * business logic is implemented in f_mass_storage.* file.  Read
+ * comments in this file for more detailed description.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/usb/ch9.h>
+
+
+/*-------------------------------------------------------------------------*/
+
+#define DRIVER_DESC		"Mass Storage Gadget"
+#define DRIVER_VERSION		"2009/09/11"
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module.  So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+#include "f_mass_storage.c"
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_device_descriptor msg_device_desc = {
+	.bLength =		sizeof msg_device_desc,
+	.bDescriptorType =	USB_DT_DEVICE,
+
+	.bcdUSB =		cpu_to_le16(0x0200),
+	.bDeviceClass =		USB_CLASS_PER_INTERFACE,
+
+	/* Vendor and product id can be overridden by module parameters.  */
+	.idVendor =		cpu_to_le16(FSG_VENDOR_ID),
+	.idProduct =		cpu_to_le16(FSG_PRODUCT_ID),
+	/* .bcdDevice = f(hardware) */
+	/* .iManufacturer = DYNAMIC */
+	/* .iProduct = DYNAMIC */
+	/* NO SERIAL NUMBER */
+	.bNumConfigurations =	1,
+};
+
+static struct usb_otg_descriptor otg_descriptor = {
+	.bLength =		sizeof otg_descriptor,
+	.bDescriptorType =	USB_DT_OTG,
+
+	/* REVISIT SRP-only hardware is possible, although
+	 * it would not be called "OTG" ...
+	 */
+	.bmAttributes =		USB_OTG_SRP | USB_OTG_HNP,
+};
+
+static const struct usb_descriptor_header *otg_desc[] = {
+	(struct usb_descriptor_header *) &otg_descriptor,
+	NULL,
+};
+
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX		0
+#define STRING_PRODUCT_IDX		1
+#define STRING_CONFIGURATION_IDX	2
+
+static char manufacturer[50];
+
+static struct usb_string strings_dev[] = {
+	[STRING_MANUFACTURER_IDX].s = manufacturer,
+	[STRING_PRODUCT_IDX].s = DRIVER_DESC,
+	[STRING_CONFIGURATION_IDX].s = "Self Powered",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+	.language	= 0x0409,	/* en-us */
+	.strings	= strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+	&stringtab_dev,
+	NULL,
+};
+
+
+
+/****************************** Configurations ******************************/
+
+static struct fsg_module_parameters mod_data = {
+	.stall = 1
+};
+FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
+
+static unsigned long msg_registered = 0;
+static void msg_cleanup(void);
+
+static int __init msg_do_config(struct usb_configuration *c)
+{
+	struct fsg_common *common;
+	struct fsg_config config;
+	int ret;
+
+	if (gadget_is_otg(c->cdev->gadget)) {
+		c->descriptors = otg_desc;
+		c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+	}
+
+	fsg_config_from_params(&config, &mod_data);
+	config.thread_exits = (void(*)(struct fsg_common*))&msg_cleanup;
+	common = fsg_common_init(0, c->cdev, &config);
+	if (IS_ERR(common))
+		return PTR_ERR(common);
+
+	ret = fsg_add(c->cdev, c, common);
+	fsg_common_put(common);
+	return ret;
+}
+
+static struct usb_configuration msg_config_driver = {
+	.label			= "Linux File-Backed Storage",
+	.bind			= msg_do_config,
+	.bConfigurationValue	= 1,
+	/* .iConfiguration = DYNAMIC */
+	.bmAttributes		= USB_CONFIG_ATT_SELFPOWER,
+};
+
+
+
+/****************************** Gadget Bind ******************************/
+
+
+static int __init msg_bind(struct usb_composite_dev *cdev)
+{
+	struct usb_gadget *gadget = cdev->gadget;
+	int status;
+
+	/* Allocate string descriptor numbers ... note that string
+	 * contents can be overridden by the composite_dev glue.
+	 */
+
+	/* device descriptor strings: manufacturer, product */
+	snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
+	         init_utsname()->sysname, init_utsname()->release,
+	         gadget->name);
+	status = usb_string_id(cdev);
+	if (status < 0)
+		return status;
+	strings_dev[STRING_MANUFACTURER_IDX].id = status;
+	msg_device_desc.iManufacturer = status;
+
+	status = usb_string_id(cdev);
+	if (status < 0)
+		return status;
+	strings_dev[STRING_PRODUCT_IDX].id = status;
+	msg_device_desc.iProduct = status;
+
+	status = usb_string_id(cdev);
+	if (status < 0)
+		return status;
+	strings_dev[STRING_CONFIGURATION_IDX].id = status;
+	msg_config_driver.iConfiguration = status;
+
+	/* register our second configuration */
+	status = usb_add_config(cdev, &msg_config_driver);
+	if (status < 0)
+		return status;
+
+	dev_info(&gadget->dev, DRIVER_DESC ", version: " DRIVER_VERSION "\n");
+	set_bit(0, &msg_registered);
+	return 0;
+}
+
+
+/****************************** Some noise ******************************/
+
+
+static struct usb_composite_driver msg_driver = {
+	.name		= "g_mass_storage",
+	.dev		= &msg_device_desc,
+	.strings	= dev_strings,
+	.bind		= msg_bind,
+};
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Michal Nazarewicz");
+MODULE_LICENSE("GPL");
+
+static int __init msg_init(void)
+{
+	return usb_composite_register(&msg_driver);
+}
+module_init(msg_init);
+
+static void msg_cleanup(void)
+{
+	if (test_and_clear_bit(0, &msg_registered))
+		usb_composite_unregister(&msg_driver);
+}
+module_exit(msg_cleanup);
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c
new file mode 100644
index 0000000..4295601
--- /dev/null
+++ b/drivers/usb/gadget/multi.c
@@ -0,0 +1,358 @@
+/*
+ * multi.c -- Multifunction Composite driver
+ *
+ * Copyright (C) 2008 David Brownell
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (C) 2009 Samsung Electronics
+ * Author: Michal Nazarewicz (m.nazarewicz@samsung.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+
+
+#if defined USB_ETH_RNDIS
+#  undef USB_ETH_RNDIS
+#endif
+#ifdef CONFIG_USB_ETH_RNDIS
+#  define USB_ETH_RNDIS y
+#endif
+
+
+#define DRIVER_DESC		"Multifunction Composite Gadget"
+#define DRIVER_VERSION		"2009/07/21"
+
+/*-------------------------------------------------------------------------*/
+
+#define MULTI_VENDOR_NUM	0x0525	/* XXX NetChip */
+#define MULTI_PRODUCT_NUM	0xa4ab	/* XXX */
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module.  So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#include "u_serial.c"
+#include "f_acm.c"
+
+#include "f_ecm.c"
+#include "f_subset.c"
+#ifdef USB_ETH_RNDIS
+#  include "f_rndis.c"
+#  include "rndis.c"
+#endif
+#include "u_ether.c"
+
+#undef DBG     /* u_ether.c has broken idea about macros */
+#undef VDBG    /* so clean up after it */
+#undef ERROR
+#undef INFO
+#include "f_mass_storage.c"
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_device_descriptor device_desc = {
+	.bLength =		sizeof device_desc,
+	.bDescriptorType =	USB_DT_DEVICE,
+
+	.bcdUSB =		cpu_to_le16(0x0200),
+
+	/* .bDeviceClass =		USB_CLASS_COMM, */
+	/* .bDeviceSubClass =	0, */
+	/* .bDeviceProtocol =	0, */
+	.bDeviceClass =		0xEF,
+	.bDeviceSubClass =	2,
+	.bDeviceProtocol =	1,
+	/* .bMaxPacketSize0 = f(hardware) */
+
+	/* Vendor and product id can be overridden by module parameters.  */
+	.idVendor =		cpu_to_le16(MULTI_VENDOR_NUM),
+	.idProduct =		cpu_to_le16(MULTI_PRODUCT_NUM),
+	/* .bcdDevice = f(hardware) */
+	/* .iManufacturer = DYNAMIC */
+	/* .iProduct = DYNAMIC */
+	/* NO SERIAL NUMBER */
+	.bNumConfigurations =	1,
+};
+
+static struct usb_otg_descriptor otg_descriptor = {
+	.bLength =		sizeof otg_descriptor,
+	.bDescriptorType =	USB_DT_OTG,
+
+	/* REVISIT SRP-only hardware is possible, although
+	 * it would not be called "OTG" ...
+	 */
+	.bmAttributes =		USB_OTG_SRP | USB_OTG_HNP,
+};
+
+static const struct usb_descriptor_header *otg_desc[] = {
+	(struct usb_descriptor_header *) &otg_descriptor,
+	NULL,
+};
+
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX		0
+#define STRING_PRODUCT_IDX		1
+
+static char manufacturer[50];
+
+static struct usb_string strings_dev[] = {
+	[STRING_MANUFACTURER_IDX].s = manufacturer,
+	[STRING_PRODUCT_IDX].s = DRIVER_DESC,
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+	.language	= 0x0409,	/* en-us */
+	.strings	= strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+	&stringtab_dev,
+	NULL,
+};
+
+static u8 hostaddr[ETH_ALEN];
+
+
+
+/****************************** Configurations ******************************/
+
+static struct fsg_module_parameters mod_data = {
+	.stall = 1
+};
+FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
+
+static struct fsg_common *fsg_common;
+
+
+#ifdef USB_ETH_RNDIS
+
+static int __init rndis_do_config(struct usb_configuration *c)
+{
+	int ret;
+
+	if (gadget_is_otg(c->cdev->gadget)) {
+		c->descriptors = otg_desc;
+		c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+	}
+
+	ret = rndis_bind_config(c, hostaddr);
+	if (ret < 0)
+		return ret;
+
+	ret = acm_bind_config(c, 0);
+	if (ret < 0)
+		return ret;
+
+	ret = fsg_add(c->cdev, c, fsg_common);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static struct usb_configuration rndis_config_driver = {
+	.label			= "Multifunction Composite (RNDIS + MS + ACM)",
+	.bind			= rndis_do_config,
+	.bConfigurationValue	= 2,
+	/* .iConfiguration = DYNAMIC */
+	.bmAttributes		= USB_CONFIG_ATT_SELFPOWER,
+};
+
+#endif
+
+#ifdef CONFIG_USB_G_MULTI_CDC
+
+static int __init cdc_do_config(struct usb_configuration *c)
+{
+	int ret;
+
+	if (gadget_is_otg(c->cdev->gadget)) {
+		c->descriptors = otg_desc;
+		c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+	}
+
+	ret = ecm_bind_config(c, hostaddr);
+	if (ret < 0)
+		return ret;
+
+	ret = acm_bind_config(c, 0);
+	if (ret < 0)
+		return ret;
+
+	ret = fsg_add(c->cdev, c, fsg_common);
+	if (ret < 0)
+		return ret;
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static struct usb_configuration cdc_config_driver = {
+	.label			= "Multifunction Composite (CDC + MS + ACM)",
+	.bind			= cdc_do_config,
+	.bConfigurationValue	= 1,
+	/* .iConfiguration = DYNAMIC */
+	.bmAttributes		= USB_CONFIG_ATT_SELFPOWER,
+};
+
+#endif
+
+
+
+/****************************** Gadget Bind ******************************/
+
+
+static int __init multi_bind(struct usb_composite_dev *cdev)
+{
+	struct usb_gadget *gadget = cdev->gadget;
+	int status, gcnum;
+
+	if (!can_support_ecm(cdev->gadget)) {
+		dev_err(&gadget->dev, "controller '%s' not usable\n",
+		        gadget->name);
+		return -EINVAL;
+	}
+
+	/* set up network link layer */
+	status = gether_setup(cdev->gadget, hostaddr);
+	if (status < 0)
+		return status;
+
+	/* set up serial link layer */
+	status = gserial_setup(cdev->gadget, 1);
+	if (status < 0)
+		goto fail0;
+
+	/* set up mass storage function */
+	fsg_common = fsg_common_from_params(0, cdev, &mod_data);
+	if (IS_ERR(fsg_common)) {
+		status = PTR_ERR(fsg_common);
+		goto fail1;
+	}
+
+
+	gcnum = usb_gadget_controller_number(gadget);
+	if (gcnum >= 0)
+		device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum);
+	else {
+		/* We assume that can_support_ecm() tells the truth;
+		 * but if the controller isn't recognized at all then
+		 * that assumption is a bit more likely to be wrong.
+		 */
+		WARNING(cdev, "controller '%s' not recognized\n",
+		        gadget->name);
+		device_desc.bcdDevice = cpu_to_le16(0x0300 | 0x0099);
+	}
+
+
+	/* Allocate string descriptor numbers ... note that string
+	 * contents can be overridden by the composite_dev glue.
+	 */
+
+	/* device descriptor strings: manufacturer, product */
+	snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
+	         init_utsname()->sysname, init_utsname()->release,
+	         gadget->name);
+	status = usb_string_id(cdev);
+	if (status < 0)
+		goto fail2;
+	strings_dev[STRING_MANUFACTURER_IDX].id = status;
+	device_desc.iManufacturer = status;
+
+	status = usb_string_id(cdev);
+	if (status < 0)
+		goto fail2;
+	strings_dev[STRING_PRODUCT_IDX].id = status;
+	device_desc.iProduct = status;
+
+#ifdef USB_ETH_RNDIS
+	/* register our first configuration */
+	status = usb_add_config(cdev, &rndis_config_driver);
+	if (status < 0)
+		goto fail2;
+#endif
+
+#ifdef CONFIG_USB_G_MULTI_CDC
+	/* register our second configuration */
+	status = usb_add_config(cdev, &cdc_config_driver);
+	if (status < 0)
+		goto fail2;
+#endif
+
+	dev_info(&gadget->dev, DRIVER_DESC ", version: " DRIVER_VERSION "\n");
+	fsg_common_put(fsg_common);
+	return 0;
+
+fail2:
+	fsg_common_put(fsg_common);
+fail1:
+	gserial_cleanup();
+fail0:
+	gether_cleanup();
+	return status;
+}
+
+static int __exit multi_unbind(struct usb_composite_dev *cdev)
+{
+	gserial_cleanup();
+	gether_cleanup();
+	return 0;
+}
+
+
+/****************************** Some noise ******************************/
+
+
+static struct usb_composite_driver multi_driver = {
+	.name		= "g_multi",
+	.dev		= &device_desc,
+	.strings	= dev_strings,
+	.bind		= multi_bind,
+	.unbind		= __exit_p(multi_unbind),
+};
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Michal Nazarewicz");
+MODULE_LICENSE("GPL");
+
+static int __init g_multi_init(void)
+{
+	return usb_composite_register(&multi_driver);
+}
+module_init(g_multi_init);
+
+static void __exit g_multi_cleanup(void)
+{
+	usb_composite_unregister(&multi_driver);
+}
+module_exit(g_multi_cleanup);
diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c
new file mode 100644
index 0000000..868d8ee
--- /dev/null
+++ b/drivers/usb/gadget/storage_common.c
@@ -0,0 +1,778 @@
+/*
+ * storage_common.c -- Common definitions for mass storage functionality
+ *
+ * Copyright (C) 2003-2008 Alan Stern
+ * Copyeight (C) 2009 Samsung Electronics
+ * Author: Michal Nazarewicz (m.nazarewicz@samsung.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+
+/*
+ * This file requires the following identifiers used in USB strings to
+ * be defined (each of type pointer to char):
+ *  - fsg_string_manufacturer -- name of the manufacturer
+ *  - fsg_string_product      -- name of the product
+ *  - fsg_string_serial       -- product's serial
+ *  - fsg_string_config       -- name of the configuration
+ *  - fsg_string_interface    -- name of the interface
+ * The first four are only needed when FSG_DESCRIPTORS_DEVICE_STRINGS
+ * macro is defined prior to including this file.
+ */
+
+/*
+ * When FSG_NO_INTR_EP is defined fsg_fs_intr_in_desc and
+ * fsg_hs_intr_in_desc objects as well as
+ * FSG_FS_FUNCTION_PRE_EP_ENTRIES and FSG_HS_FUNCTION_PRE_EP_ENTRIES
+ * macros are not defined.
+ *
+ * When FSG_NO_DEVICE_STRINGS is defined FSG_STRING_MANUFACTURER,
+ * FSG_STRING_PRODUCT, FSG_STRING_SERIAL and FSG_STRING_CONFIG are not
+ * defined (as well as corresponding entries in string tables are
+ * missing) and FSG_STRING_INTERFACE has value of zero.
+ *
+ * When FSG_NO_OTG is defined fsg_otg_desc won't be defined.
+ */
+
+/*
+ * When FSG_BUFFHD_STATIC_BUFFER is defined when this file is included
+ * the fsg_buffhd structure's buf field will be an array of FSG_BUFLEN
+ * characters rather then a pointer to void.
+ */
+
+
+#include <asm/unaligned.h>
+
+
+/* Thanks to NetChip Technologies for donating this product ID.
+ *
+ * DO NOT REUSE THESE IDs with any other driver!!  Ever!!
+ * Instead:  allocate your own, using normal USB-IF procedures. */
+#define FSG_VENDOR_ID	0x0525	/* NetChip */
+#define FSG_PRODUCT_ID	0xa4a5	/* Linux-USB File-backed Storage Gadget */
+
+
+/*-------------------------------------------------------------------------*/
+
+
+#ifndef DEBUG
+#undef VERBOSE_DEBUG
+#undef DUMP_MSGS
+#endif /* !DEBUG */
+
+#ifdef VERBOSE_DEBUG
+#define VLDBG	LDBG
+#else
+#define VLDBG(lun, fmt, args...) do { } while (0)
+#endif /* VERBOSE_DEBUG */
+
+#define LDBG(lun, fmt, args...)   dev_dbg (&(lun)->dev, fmt, ## args)
+#define LERROR(lun, fmt, args...) dev_err (&(lun)->dev, fmt, ## args)
+#define LWARN(lun, fmt, args...)  dev_warn(&(lun)->dev, fmt, ## args)
+#define LINFO(lun, fmt, args...)  dev_info(&(lun)->dev, fmt, ## args)
+
+/* Keep those macros in sync with thos in
+ * include/linux/ubs/composite.h or else GCC will complain.  If they
+ * are identical (the same names of arguments, white spaces in the
+ * same places) GCC will allow redefinition otherwise (even if some
+ * white space is removed or added) warning will be issued.  No
+ * checking if those symbols is defined is performed because warning
+ * is desired when those macros were defined by someone else to mean
+ * something else. */
+#define DBG(d, fmt, args...)     dev_dbg(&(d)->gadget->dev , fmt , ## args)
+#define VDBG(d, fmt, args...)    dev_vdbg(&(d)->gadget->dev , fmt , ## args)
+#define ERROR(d, fmt, args...)   dev_err(&(d)->gadget->dev , fmt , ## args)
+#define WARNING(d, fmt, args...) dev_warn(&(d)->gadget->dev , fmt , ## args)
+#define INFO(d, fmt, args...)    dev_info(&(d)->gadget->dev , fmt , ## args)
+
+
+
+#ifdef DUMP_MSGS
+
+#  define dump_msg(fsg, /* const char * */ label,			\
+		   /* const u8 * */ buf, /* unsigned */ length) do {	\
+	if (length < 512) {						\
+		DBG(fsg, "%s, length %u:\n", label, length);		\
+		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,	\
+			       16, 1, buf, length, 0);			\
+	}								\
+} while (0)
+
+#  define dump_cdb(fsg) do { } while (0)
+
+#else
+
+#  define dump_msg(fsg, /* const char * */ label, \
+		   /* const u8 * */ buf, /* unsigned */ length) do { } while (0)
+
+#  ifdef VERBOSE_DEBUG
+
+#    define dump_cdb(fsg)						\
+	print_hex_dump(KERN_DEBUG, "SCSI CDB: ", DUMP_PREFIX_NONE,	\
+		       16, 1, (fsg)->cmnd, (fsg)->cmnd_size, 0)		\
+
+#  else
+
+#    define dump_cdb(fsg) do { } while (0)
+
+#  endif /* VERBOSE_DEBUG */
+
+#endif /* DUMP_MSGS */
+
+
+
+
+
+/*-------------------------------------------------------------------------*/
+
+/* SCSI device types */
+#define TYPE_DISK	0x00
+#define TYPE_CDROM	0x05
+
+/* USB protocol value = the transport method */
+#define USB_PR_CBI	0x00		/* Control/Bulk/Interrupt */
+#define USB_PR_CB	0x01		/* Control/Bulk w/o interrupt */
+#define USB_PR_BULK	0x50		/* Bulk-only */
+
+/* USB subclass value = the protocol encapsulation */
+#define USB_SC_RBC	0x01		/* Reduced Block Commands (flash) */
+#define USB_SC_8020	0x02		/* SFF-8020i, MMC-2, ATAPI (CD-ROM) */
+#define USB_SC_QIC	0x03		/* QIC-157 (tape) */
+#define USB_SC_UFI	0x04		/* UFI (floppy) */
+#define USB_SC_8070	0x05		/* SFF-8070i (removable) */
+#define USB_SC_SCSI	0x06		/* Transparent SCSI */
+
+/* Bulk-only data structures */
+
+/* Command Block Wrapper */
+struct fsg_bulk_cb_wrap {
+	__le32	Signature;		/* Contains 'USBC' */
+	u32	Tag;			/* Unique per command id */
+	__le32	DataTransferLength;	/* Size of the data */
+	u8	Flags;			/* Direction in bit 7 */
+	u8	Lun;			/* LUN (normally 0) */
+	u8	Length;			/* Of the CDB, <= MAX_COMMAND_SIZE */
+	u8	CDB[16];		/* Command Data Block */
+};
+
+#define USB_BULK_CB_WRAP_LEN	31
+#define USB_BULK_CB_SIG		0x43425355	/* Spells out USBC */
+#define USB_BULK_IN_FLAG	0x80
+
+/* Command Status Wrapper */
+struct bulk_cs_wrap {
+	__le32	Signature;		/* Should = 'USBS' */
+	u32	Tag;			/* Same as original command */
+	__le32	Residue;		/* Amount not transferred */
+	u8	Status;			/* See below */
+};
+
+#define USB_BULK_CS_WRAP_LEN	13
+#define USB_BULK_CS_SIG		0x53425355	/* Spells out 'USBS' */
+#define USB_STATUS_PASS		0
+#define USB_STATUS_FAIL		1
+#define USB_STATUS_PHASE_ERROR	2
+
+/* Bulk-only class specific requests */
+#define USB_BULK_RESET_REQUEST		0xff
+#define USB_BULK_GET_MAX_LUN_REQUEST	0xfe
+
+
+/* CBI Interrupt data structure */
+struct interrupt_data {
+	u8	bType;
+	u8	bValue;
+};
+
+#define CBI_INTERRUPT_DATA_LEN		2
+
+/* CBI Accept Device-Specific Command request */
+#define USB_CBI_ADSC_REQUEST		0x00
+
+
+/* Length of a SCSI Command Data Block */
+#define MAX_COMMAND_SIZE	16
+
+/* SCSI commands that we recognize */
+#define SC_FORMAT_UNIT			0x04
+#define SC_INQUIRY			0x12
+#define SC_MODE_SELECT_6		0x15
+#define SC_MODE_SELECT_10		0x55
+#define SC_MODE_SENSE_6			0x1a
+#define SC_MODE_SENSE_10		0x5a
+#define SC_PREVENT_ALLOW_MEDIUM_REMOVAL	0x1e
+#define SC_READ_6			0x08
+#define SC_READ_10			0x28
+#define SC_READ_12			0xa8
+#define SC_READ_CAPACITY		0x25
+#define SC_READ_FORMAT_CAPACITIES	0x23
+#define SC_READ_HEADER			0x44
+#define SC_READ_TOC			0x43
+#define SC_RELEASE			0x17
+#define SC_REQUEST_SENSE		0x03
+#define SC_RESERVE			0x16
+#define SC_SEND_DIAGNOSTIC		0x1d
+#define SC_START_STOP_UNIT		0x1b
+#define SC_SYNCHRONIZE_CACHE		0x35
+#define SC_TEST_UNIT_READY		0x00
+#define SC_VERIFY			0x2f
+#define SC_WRITE_6			0x0a
+#define SC_WRITE_10			0x2a
+#define SC_WRITE_12			0xaa
+
+/* SCSI Sense Key/Additional Sense Code/ASC Qualifier values */
+#define SS_NO_SENSE				0
+#define SS_COMMUNICATION_FAILURE		0x040800
+#define SS_INVALID_COMMAND			0x052000
+#define SS_INVALID_FIELD_IN_CDB			0x052400
+#define SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE	0x052100
+#define SS_LOGICAL_UNIT_NOT_SUPPORTED		0x052500
+#define SS_MEDIUM_NOT_PRESENT			0x023a00
+#define SS_MEDIUM_REMOVAL_PREVENTED		0x055302
+#define SS_NOT_READY_TO_READY_TRANSITION	0x062800
+#define SS_RESET_OCCURRED			0x062900
+#define SS_SAVING_PARAMETERS_NOT_SUPPORTED	0x053900
+#define SS_UNRECOVERED_READ_ERROR		0x031100
+#define SS_WRITE_ERROR				0x030c02
+#define SS_WRITE_PROTECTED			0x072700
+
+#define SK(x)		((u8) ((x) >> 16))	/* Sense Key byte, etc. */
+#define ASC(x)		((u8) ((x) >> 8))
+#define ASCQ(x)		((u8) (x))
+
+
+/*-------------------------------------------------------------------------*/
+
+
+struct fsg_lun {
+	struct file	*filp;
+	loff_t		file_length;
+	loff_t		num_sectors;
+
+	unsigned int	initially_ro:1;
+	unsigned int	ro:1;
+	unsigned int	removable:1;
+	unsigned int	cdrom:1;
+	unsigned int	prevent_medium_removal:1;
+	unsigned int	registered:1;
+	unsigned int	info_valid:1;
+
+	u32		sense_data;
+	u32		sense_data_info;
+	u32		unit_attention_data;
+
+	struct device	dev;
+};
+
+#define fsg_lun_is_open(curlun)	((curlun)->filp != NULL)
+
+static struct fsg_lun *fsg_lun_from_dev(struct device *dev)
+{
+	return container_of(dev, struct fsg_lun, dev);
+}
+
+
+/* Big enough to hold our biggest descriptor */
+#define EP0_BUFSIZE	256
+#define DELAYED_STATUS	(EP0_BUFSIZE + 999)	/* An impossibly large value */
+
+/* Number of buffers we will use.  2 is enough for double-buffering */
+#define FSG_NUM_BUFFERS	2
+
+/* Default size of buffer length. */
+#define FSG_BUFLEN	((u32)16384)
+
+/* Maximal number of LUNs supported in mass storage function */
+#define FSG_MAX_LUNS	8
+
+enum fsg_buffer_state {
+	BUF_STATE_EMPTY = 0,
+	BUF_STATE_FULL,
+	BUF_STATE_BUSY
+};
+
+struct fsg_buffhd {
+#ifdef FSG_BUFFHD_STATIC_BUFFER
+	char				buf[FSG_BUFLEN];
+#else
+	void				*buf;
+#endif
+	enum fsg_buffer_state		state;
+	struct fsg_buffhd		*next;
+
+	/* The NetChip 2280 is faster, and handles some protocol faults
+	 * better, if we don't submit any short bulk-out read requests.
+	 * So we will record the intended request length here. */
+	unsigned int			bulk_out_intended_length;
+
+	struct usb_request		*inreq;
+	int				inreq_busy;
+	struct usb_request		*outreq;
+	int				outreq_busy;
+};
+
+enum fsg_state {
+	/* This one isn't used anywhere */
+	FSG_STATE_COMMAND_PHASE = -10,
+	FSG_STATE_DATA_PHASE,
+	FSG_STATE_STATUS_PHASE,
+
+	FSG_STATE_IDLE = 0,
+	FSG_STATE_ABORT_BULK_OUT,
+	FSG_STATE_RESET,
+	FSG_STATE_INTERFACE_CHANGE,
+	FSG_STATE_CONFIG_CHANGE,
+	FSG_STATE_DISCONNECT,
+	FSG_STATE_EXIT,
+	FSG_STATE_TERMINATED
+};
+
+enum data_direction {
+	DATA_DIR_UNKNOWN = 0,
+	DATA_DIR_FROM_HOST,
+	DATA_DIR_TO_HOST,
+	DATA_DIR_NONE
+};
+
+
+/*-------------------------------------------------------------------------*/
+
+
+static inline u32 get_unaligned_be24(u8 *buf)
+{
+	return 0xffffff & (u32) get_unaligned_be32(buf - 1);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+
+enum {
+#ifndef FSG_NO_DEVICE_STRINGS
+	FSG_STRING_MANUFACTURER	= 1,
+	FSG_STRING_PRODUCT,
+	FSG_STRING_SERIAL,
+	FSG_STRING_CONFIG,
+#endif
+	FSG_STRING_INTERFACE
+};
+
+
+#ifndef FSG_NO_OTG
+static struct usb_otg_descriptor
+fsg_otg_desc = {
+	.bLength =		sizeof fsg_otg_desc,
+	.bDescriptorType =	USB_DT_OTG,
+
+	.bmAttributes =		USB_OTG_SRP,
+};
+#endif
+
+/* There is only one interface. */
+
+static struct usb_interface_descriptor
+fsg_intf_desc = {
+	.bLength =		sizeof fsg_intf_desc,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	.bNumEndpoints =	2,		/* Adjusted during fsg_bind() */
+	.bInterfaceClass =	USB_CLASS_MASS_STORAGE,
+	.bInterfaceSubClass =	USB_SC_SCSI,	/* Adjusted during fsg_bind() */
+	.bInterfaceProtocol =	USB_PR_BULK,	/* Adjusted during fsg_bind() */
+	.iInterface =		FSG_STRING_INTERFACE,
+};
+
+/* Three full-speed endpoint descriptors: bulk-in, bulk-out,
+ * and interrupt-in. */
+
+static struct usb_endpoint_descriptor
+fsg_fs_bulk_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	/* wMaxPacketSize set by autoconfiguration */
+};
+
+static struct usb_endpoint_descriptor
+fsg_fs_bulk_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	/* wMaxPacketSize set by autoconfiguration */
+};
+
+#ifndef FSG_NO_INTR_EP
+
+static struct usb_endpoint_descriptor
+fsg_fs_intr_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(2),
+	.bInterval =		32,	/* frames -> 32 ms */
+};
+
+#ifndef FSG_NO_OTG
+#  define FSG_FS_FUNCTION_PRE_EP_ENTRIES	2
+#else
+#  define FSG_FS_FUNCTION_PRE_EP_ENTRIES	1
+#endif
+
+#endif
+
+static struct usb_descriptor_header *fsg_fs_function[] = {
+#ifndef FSG_NO_OTG
+	(struct usb_descriptor_header *) &fsg_otg_desc,
+#endif
+	(struct usb_descriptor_header *) &fsg_intf_desc,
+	(struct usb_descriptor_header *) &fsg_fs_bulk_in_desc,
+	(struct usb_descriptor_header *) &fsg_fs_bulk_out_desc,
+#ifndef FSG_NO_INTR_EP
+	(struct usb_descriptor_header *) &fsg_fs_intr_in_desc,
+#endif
+	NULL,
+};
+
+
+/*
+ * USB 2.0 devices need to expose both high speed and full speed
+ * descriptors, unless they only run at full speed.
+ *
+ * That means alternate endpoint descriptors (bigger packets)
+ * and a "device qualifier" ... plus more construction options
+ * for the config descriptor.
+ */
+static struct usb_endpoint_descriptor
+fsg_hs_bulk_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	/* bEndpointAddress copied from fs_bulk_in_desc during fsg_bind() */
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor
+fsg_hs_bulk_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	/* bEndpointAddress copied from fs_bulk_out_desc during fsg_bind() */
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+	.bInterval =		1,	/* NAK every 1 uframe */
+};
+
+#ifndef FSG_NO_INTR_EP
+
+static struct usb_endpoint_descriptor
+fsg_hs_intr_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	/* bEndpointAddress copied from fs_intr_in_desc during fsg_bind() */
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(2),
+	.bInterval =		9,	/* 2**(9-1) = 256 uframes -> 32 ms */
+};
+
+#ifndef FSG_NO_OTG
+#  define FSG_HS_FUNCTION_PRE_EP_ENTRIES	2
+#else
+#  define FSG_HS_FUNCTION_PRE_EP_ENTRIES	1
+#endif
+
+#endif
+
+static struct usb_descriptor_header *fsg_hs_function[] = {
+#ifndef FSG_NO_OTG
+	(struct usb_descriptor_header *) &fsg_otg_desc,
+#endif
+	(struct usb_descriptor_header *) &fsg_intf_desc,
+	(struct usb_descriptor_header *) &fsg_hs_bulk_in_desc,
+	(struct usb_descriptor_header *) &fsg_hs_bulk_out_desc,
+#ifndef FSG_NO_INTR_EP
+	(struct usb_descriptor_header *) &fsg_hs_intr_in_desc,
+#endif
+	NULL,
+};
+
+/* Maxpacket and other transfer characteristics vary by speed. */
+static struct usb_endpoint_descriptor *
+fsg_ep_desc(struct usb_gadget *g, struct usb_endpoint_descriptor *fs,
+		struct usb_endpoint_descriptor *hs)
+{
+	if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+		return hs;
+	return fs;
+}
+
+
+/* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */
+static struct usb_string		fsg_strings[] = {
+#ifndef FSG_NO_DEVICE_STRINGS
+	{FSG_STRING_MANUFACTURER,	fsg_string_manufacturer},
+	{FSG_STRING_PRODUCT,		fsg_string_product},
+	{FSG_STRING_SERIAL,		fsg_string_serial},
+	{FSG_STRING_CONFIG,		fsg_string_config},
+#endif
+	{FSG_STRING_INTERFACE,		fsg_string_interface},
+	{}
+};
+
+static struct usb_gadget_strings	fsg_stringtab = {
+	.language	= 0x0409,		/* en-us */
+	.strings	= fsg_strings,
+};
+
+
+ /*-------------------------------------------------------------------------*/
+
+/* If the next two routines are called while the gadget is registered,
+ * the caller must own fsg->filesem for writing. */
+
+static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
+{
+	int				ro;
+	struct file			*filp = NULL;
+	int				rc = -EINVAL;
+	struct inode			*inode = NULL;
+	loff_t				size;
+	loff_t				num_sectors;
+	loff_t				min_sectors;
+
+	/* R/W if we can, R/O if we must */
+	ro = curlun->initially_ro;
+	if (!ro) {
+		filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0);
+		if (-EROFS == PTR_ERR(filp))
+			ro = 1;
+	}
+	if (ro)
+		filp = filp_open(filename, O_RDONLY | O_LARGEFILE, 0);
+	if (IS_ERR(filp)) {
+		LINFO(curlun, "unable to open backing file: %s\n", filename);
+		return PTR_ERR(filp);
+	}
+
+	if (!(filp->f_mode & FMODE_WRITE))
+		ro = 1;
+
+	if (filp->f_path.dentry)
+		inode = filp->f_path.dentry->d_inode;
+	if (inode && S_ISBLK(inode->i_mode)) {
+		if (bdev_read_only(inode->i_bdev))
+			ro = 1;
+	} else if (!inode || !S_ISREG(inode->i_mode)) {
+		LINFO(curlun, "invalid file type: %s\n", filename);
+		goto out;
+	}
+
+	/* If we can't read the file, it's no good.
+	 * If we can't write the file, use it read-only. */
+	if (!filp->f_op || !(filp->f_op->read || filp->f_op->aio_read)) {
+		LINFO(curlun, "file not readable: %s\n", filename);
+		goto out;
+	}
+	if (!(filp->f_op->write || filp->f_op->aio_write))
+		ro = 1;
+
+	size = i_size_read(inode->i_mapping->host);
+	if (size < 0) {
+		LINFO(curlun, "unable to find file size: %s\n", filename);
+		rc = (int) size;
+		goto out;
+	}
+	num_sectors = size >> 9;	/* File size in 512-byte blocks */
+	min_sectors = 1;
+	if (curlun->cdrom) {
+		num_sectors &= ~3;	/* Reduce to a multiple of 2048 */
+		min_sectors = 300*4;	/* Smallest track is 300 frames */
+		if (num_sectors >= 256*60*75*4) {
+			num_sectors = (256*60*75 - 1) * 4;
+			LINFO(curlun, "file too big: %s\n", filename);
+			LINFO(curlun, "using only first %d blocks\n",
+					(int) num_sectors);
+		}
+	}
+	if (num_sectors < min_sectors) {
+		LINFO(curlun, "file too small: %s\n", filename);
+		rc = -ETOOSMALL;
+		goto out;
+	}
+
+	get_file(filp);
+	curlun->ro = ro;
+	curlun->filp = filp;
+	curlun->file_length = size;
+	curlun->num_sectors = num_sectors;
+	LDBG(curlun, "open backing file: %s\n", filename);
+	rc = 0;
+
+out:
+	filp_close(filp, current->files);
+	return rc;
+}
+
+
+static void fsg_lun_close(struct fsg_lun *curlun)
+{
+	if (curlun->filp) {
+		LDBG(curlun, "close backing file\n");
+		fput(curlun->filp);
+		curlun->filp = NULL;
+	}
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Sync the file data, don't bother with the metadata.
+ * This code was copied from fs/buffer.c:sys_fdatasync(). */
+static int fsg_lun_fsync_sub(struct fsg_lun *curlun)
+{
+	struct file	*filp = curlun->filp;
+
+	if (curlun->ro || !filp)
+		return 0;
+	return vfs_fsync(filp, filp->f_path.dentry, 1);
+}
+
+static void store_cdrom_address(u8 *dest, int msf, u32 addr)
+{
+	if (msf) {
+		/* Convert to Minutes-Seconds-Frames */
+		addr >>= 2;		/* Convert to 2048-byte frames */
+		addr += 2*75;		/* Lead-in occupies 2 seconds */
+		dest[3] = addr % 75;	/* Frames */
+		addr /= 75;
+		dest[2] = addr % 60;	/* Seconds */
+		addr /= 60;
+		dest[1] = addr;		/* Minutes */
+		dest[0] = 0;		/* Reserved */
+	} else {
+		/* Absolute sector */
+		put_unaligned_be32(addr, dest);
+	}
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+
+static ssize_t fsg_show_ro(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct fsg_lun	*curlun = fsg_lun_from_dev(dev);
+
+	return sprintf(buf, "%d\n", fsg_lun_is_open(curlun)
+				  ? curlun->ro
+				  : curlun->initially_ro);
+}
+
+static ssize_t fsg_show_file(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct fsg_lun	*curlun = fsg_lun_from_dev(dev);
+	struct rw_semaphore	*filesem = dev_get_drvdata(dev);
+	char		*p;
+	ssize_t		rc;
+
+	down_read(filesem);
+	if (fsg_lun_is_open(curlun)) {	/* Get the complete pathname */
+		p = d_path(&curlun->filp->f_path, buf, PAGE_SIZE - 1);
+		if (IS_ERR(p))
+			rc = PTR_ERR(p);
+		else {
+			rc = strlen(p);
+			memmove(buf, p, rc);
+			buf[rc] = '\n';		/* Add a newline */
+			buf[++rc] = 0;
+		}
+	} else {				/* No file, return 0 bytes */
+		*buf = 0;
+		rc = 0;
+	}
+	up_read(filesem);
+	return rc;
+}
+
+
+static ssize_t fsg_store_ro(struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	ssize_t		rc = count;
+	struct fsg_lun	*curlun = fsg_lun_from_dev(dev);
+	struct rw_semaphore	*filesem = dev_get_drvdata(dev);
+	int		i;
+
+	if (sscanf(buf, "%d", &i) != 1)
+		return -EINVAL;
+
+	/* Allow the write-enable status to change only while the backing file
+	 * is closed. */
+	down_read(filesem);
+	if (fsg_lun_is_open(curlun)) {
+		LDBG(curlun, "read-only status change prevented\n");
+		rc = -EBUSY;
+	} else {
+		curlun->ro = !!i;
+		curlun->initially_ro = !!i;
+		LDBG(curlun, "read-only status set to %d\n", curlun->ro);
+	}
+	up_read(filesem);
+	return rc;
+}
+
+static ssize_t fsg_store_file(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct fsg_lun	*curlun = fsg_lun_from_dev(dev);
+	struct rw_semaphore	*filesem = dev_get_drvdata(dev);
+	int		rc = 0;
+
+	if (curlun->prevent_medium_removal && fsg_lun_is_open(curlun)) {
+		LDBG(curlun, "eject attempt prevented\n");
+		return -EBUSY;				/* "Door is locked" */
+	}
+
+	/* Remove a trailing newline */
+	if (count > 0 && buf[count-1] == '\n')
+		((char *) buf)[count-1] = 0;		/* Ugh! */
+
+	/* Eject current medium */
+	down_write(filesem);
+	if (fsg_lun_is_open(curlun)) {
+		fsg_lun_close(curlun);
+		curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
+	}
+
+	/* Load new medium */
+	if (count > 0 && buf[0]) {
+		rc = fsg_lun_open(curlun, buf);
+		if (rc == 0)
+			curlun->unit_attention_data =
+					SS_NOT_READY_TO_READY_TRANSITION;
+	}
+	up_write(filesem);
+	return (rc < 0 ? rc : count);
+}
diff --git a/drivers/usb/gadget/u_ether.h b/drivers/usb/gadget/u_ether.h
index 91b39ff..fd55f45 100644
--- a/drivers/usb/gadget/u_ether.h
+++ b/drivers/usb/gadget/u_ether.h
@@ -112,7 +112,7 @@
 int ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
 int eem_bind_config(struct usb_configuration *c);
 
-#ifdef CONFIG_USB_ETH_RNDIS
+#ifdef USB_ETH_RNDIS
 
 int rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
 
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 9b43b22..2678a16 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -90,14 +90,25 @@
 
 config USB_EHCI_BIG_ENDIAN_MMIO
 	bool
-	depends on USB_EHCI_HCD && (PPC_CELLEB || PPC_PS3 || 440EPX || ARCH_IXP4XX)
+	depends on USB_EHCI_HCD && (PPC_CELLEB || PPC_PS3 || 440EPX || ARCH_IXP4XX || XPS_USB_HCD_XILINX)
 	default y
 
 config USB_EHCI_BIG_ENDIAN_DESC
 	bool
-	depends on USB_EHCI_HCD && (440EPX || ARCH_IXP4XX)
+	depends on USB_EHCI_HCD && (440EPX || ARCH_IXP4XX || XPS_USB_HCD_XILINX)
 	default y
 
+config XPS_USB_HCD_XILINX
+	bool "Use Xilinx usb host EHCI controller core"
+	depends on USB_EHCI_HCD && (PPC32 || MICROBLAZE)
+	select USB_EHCI_BIG_ENDIAN_DESC
+	select USB_EHCI_BIG_ENDIAN_MMIO
+	---help---
+		Xilinx xps USB host controller core is EHCI compilant and has
+		transaction translator built-in. It can be configured to either
+		support both high speed and full speed devices, or high speed
+		devices only.
+
 config USB_EHCI_FSL
 	bool "Support for Freescale on-chip EHCI USB controller"
 	depends on USB_EHCI_HCD && FSL_SOC
@@ -105,6 +116,13 @@
 	---help---
 	  Variation of ARC USB block used in some Freescale chips.
 
+config USB_EHCI_MXC
+	bool "Support for Freescale on-chip EHCI USB controller"
+	depends on USB_EHCI_HCD && ARCH_MXC
+	select USB_EHCI_ROOT_HUB_TT
+	---help---
+	  Variation of ARC USB block used in some Freescale chips.
+
 config USB_EHCI_HCD_PPC_OF
 	bool "EHCI support for PPC USB controller on OF platform bus"
 	depends on USB_EHCI_HCD && PPC_OF
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index d8f4aaa..5859522 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -549,7 +549,7 @@
 	/* controllers may cache some of the periodic schedule ... */
 	hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
 	if (HCC_ISOC_CACHE(hcc_params))		// full frame cache
-		ehci->i_thresh = 8;
+		ehci->i_thresh = 2 + 8;
 	else					// N microframes cached
 		ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
 
@@ -605,6 +605,8 @@
 	}
 	ehci->command = temp;
 
+	/* Accept arbitrarily long scatter-gather lists */
+	hcd->self.sg_tablesize = ~0;
 	return 0;
 }
 
@@ -1105,11 +1107,21 @@
 #define	PLATFORM_DRIVER		ehci_fsl_driver
 #endif
 
+#ifdef CONFIG_USB_EHCI_MXC
+#include "ehci-mxc.c"
+#define PLATFORM_DRIVER		ehci_mxc_driver
+#endif
+
 #ifdef CONFIG_SOC_AU1200
 #include "ehci-au1xxx.c"
 #define	PLATFORM_DRIVER		ehci_hcd_au1xxx_driver
 #endif
 
+#ifdef CONFIG_ARCH_OMAP34XX
+#include "ehci-omap.c"
+#define        PLATFORM_DRIVER         ehci_hcd_omap_driver
+#endif
+
 #ifdef CONFIG_PPC_PS3
 #include "ehci-ps3.c"
 #define	PS3_SYSTEM_BUS_DRIVER	ps3_ehci_driver
@@ -1120,6 +1132,11 @@
 #define OF_PLATFORM_DRIVER	ehci_hcd_ppc_of_driver
 #endif
 
+#ifdef CONFIG_XPS_USB_HCD_XILINX
+#include "ehci-xilinx-of.c"
+#define OF_PLATFORM_DRIVER	ehci_hcd_xilinx_of_driver
+#endif
+
 #ifdef CONFIG_PLAT_ORION
 #include "ehci-orion.c"
 #define	PLATFORM_DRIVER		ehci_orion_driver
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 1b6f1c0..2c6571c 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -236,7 +236,7 @@
 	}
 
 	if (unlikely(ehci->debug)) {
-		if (ehci->debug && !dbgp_reset_prep())
+		if (!dbgp_reset_prep())
 			ehci->debug = NULL;
 		else
 			dbgp_external_startup();
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
new file mode 100644
index 0000000..35c56f4
--- /dev/null
+++ b/drivers/usb/host/ehci-mxc.c
@@ -0,0 +1,296 @@
+/*
+ * Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+ * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/usb/otg.h>
+
+#include <mach/mxc_ehci.h>
+
+#define ULPI_VIEWPORT_OFFSET	0x170
+#define PORTSC_OFFSET		0x184
+#define USBMODE_OFFSET		0x1a8
+#define USBMODE_CM_HOST		3
+
+struct ehci_mxc_priv {
+	struct clk *usbclk, *ahbclk;
+	struct usb_hcd *hcd;
+};
+
+/* called during probe() after chip reset completes */
+static int ehci_mxc_setup(struct usb_hcd *hcd)
+{
+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+	int retval;
+
+	/* EHCI registers start at offset 0x100 */
+	ehci->caps = hcd->regs + 0x100;
+	ehci->regs = hcd->regs + 0x100 +
+	    HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
+	dbg_hcs_params(ehci, "reset");
+	dbg_hcc_params(ehci, "reset");
+
+	/* cache this readonly data; minimize chip reads */
+	ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+
+	retval = ehci_halt(ehci);
+	if (retval)
+		return retval;
+
+	/* data structure init */
+	retval = ehci_init(hcd);
+	if (retval)
+		return retval;
+
+	hcd->has_tt = 1;
+
+	ehci->sbrn = 0x20;
+
+	ehci_reset(ehci);
+
+	ehci_port_power(ehci, 0);
+	return 0;
+}
+
+static const struct hc_driver ehci_mxc_hc_driver = {
+	.description = hcd_name,
+	.product_desc = "Freescale On-Chip EHCI Host Controller",
+	.hcd_priv_size = sizeof(struct ehci_hcd),
+
+	/*
+	 * generic hardware linkage
+	 */
+	.irq = ehci_irq,
+	.flags = HCD_USB2 | HCD_MEMORY,
+
+	/*
+	 * basic lifecycle operations
+	 */
+	.reset = ehci_mxc_setup,
+	.start = ehci_run,
+	.stop = ehci_stop,
+	.shutdown = ehci_shutdown,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue = ehci_urb_enqueue,
+	.urb_dequeue = ehci_urb_dequeue,
+	.endpoint_disable = ehci_endpoint_disable,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number = ehci_get_frame,
+
+	/*
+	 * root hub support
+	 */
+	.hub_status_data = ehci_hub_status_data,
+	.hub_control = ehci_hub_control,
+	.bus_suspend = ehci_bus_suspend,
+	.bus_resume = ehci_bus_resume,
+	.relinquish_port = ehci_relinquish_port,
+	.port_handed_over = ehci_port_handed_over,
+};
+
+static int ehci_mxc_drv_probe(struct platform_device *pdev)
+{
+	struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data;
+	struct usb_hcd *hcd;
+	struct resource *res;
+	int irq, ret, temp;
+	struct ehci_mxc_priv *priv;
+	struct device *dev = &pdev->dev;
+
+	dev_info(&pdev->dev, "initializing i.MX USB Controller\n");
+
+	if (!pdata) {
+		dev_err(dev, "No platform data given, bailing out.\n");
+		return -EINVAL;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+
+	hcd = usb_create_hcd(&ehci_mxc_hc_driver, dev, dev_name(dev));
+	if (!hcd)
+		return -ENOMEM;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		ret = -ENOMEM;
+		goto err_alloc;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev, "Found HC with no register addr. Check setup!\n");
+		ret = -ENODEV;
+		goto err_get_resource;
+	}
+
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+
+	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
+		dev_dbg(dev, "controller already in use\n");
+		ret = -EBUSY;
+		goto err_request_mem;
+	}
+
+	hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+	if (!hcd->regs) {
+		dev_err(dev, "error mapping memory\n");
+		ret = -EFAULT;
+		goto err_ioremap;
+	}
+
+	/* enable clocks */
+	priv->usbclk = clk_get(dev, "usb");
+	if (IS_ERR(priv->usbclk)) {
+		ret = PTR_ERR(priv->usbclk);
+		goto err_clk;
+	}
+	clk_enable(priv->usbclk);
+
+	if (!cpu_is_mx35()) {
+		priv->ahbclk = clk_get(dev, "usb_ahb");
+		if (IS_ERR(priv->ahbclk)) {
+			ret = PTR_ERR(priv->ahbclk);
+			goto err_clk_ahb;
+		}
+		clk_enable(priv->ahbclk);
+	}
+
+	/* set USBMODE to host mode */
+	temp = readl(hcd->regs + USBMODE_OFFSET);
+	writel(temp | USBMODE_CM_HOST, hcd->regs + USBMODE_OFFSET);
+
+	/* set up the PORTSCx register */
+	writel(pdata->portsc, hcd->regs + PORTSC_OFFSET);
+	mdelay(10);
+
+	/* setup USBCONTROL. */
+	ret = mxc_set_usbcontrol(pdev->id, pdata->flags);
+	if (ret < 0)
+		goto err_init;
+
+	/* call platform specific init function */
+	if (pdata->init) {
+		ret = pdata->init(pdev);
+		if (ret) {
+			dev_err(dev, "platform init failed\n");
+			goto err_init;
+		}
+	}
+
+	/* most platforms need some time to settle changed IO settings */
+	mdelay(10);
+
+	/* Initialize the transceiver */
+	if (pdata->otg) {
+		pdata->otg->io_priv = hcd->regs + ULPI_VIEWPORT_OFFSET;
+		if (otg_init(pdata->otg) != 0)
+			dev_err(dev, "unable to init transceiver\n");
+		else if (otg_set_vbus(pdata->otg, 1) != 0)
+			dev_err(dev, "unable to enable vbus on transceiver\n");
+	}
+
+	priv->hcd = hcd;
+	platform_set_drvdata(pdev, priv);
+
+	ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+	if (ret)
+		goto err_add;
+
+	return 0;
+
+err_add:
+	if (pdata && pdata->exit)
+		pdata->exit(pdev);
+err_init:
+	if (priv->ahbclk) {
+		clk_disable(priv->ahbclk);
+		clk_put(priv->ahbclk);
+	}
+err_clk_ahb:
+	clk_disable(priv->usbclk);
+	clk_put(priv->usbclk);
+err_clk:
+	iounmap(hcd->regs);
+err_ioremap:
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err_request_mem:
+err_get_resource:
+	kfree(priv);
+err_alloc:
+	usb_put_hcd(hcd);
+	return ret;
+}
+
+static int __exit ehci_mxc_drv_remove(struct platform_device *pdev)
+{
+	struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data;
+	struct ehci_mxc_priv *priv = platform_get_drvdata(pdev);
+	struct usb_hcd *hcd = priv->hcd;
+
+	if (pdata && pdata->exit)
+		pdata->exit(pdev);
+
+	if (pdata->otg)
+		otg_shutdown(pdata->otg);
+
+	usb_remove_hcd(hcd);
+	iounmap(hcd->regs);
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+	usb_put_hcd(hcd);
+	platform_set_drvdata(pdev, NULL);
+
+	clk_disable(priv->usbclk);
+	clk_put(priv->usbclk);
+	if (priv->ahbclk) {
+		clk_disable(priv->ahbclk);
+		clk_put(priv->ahbclk);
+	}
+
+	kfree(priv);
+
+	return 0;
+}
+
+static void ehci_mxc_drv_shutdown(struct platform_device *pdev)
+{
+	struct ehci_mxc_priv *priv = platform_get_drvdata(pdev);
+	struct usb_hcd *hcd = priv->hcd;
+
+	if (hcd->driver->shutdown)
+		hcd->driver->shutdown(hcd);
+}
+
+MODULE_ALIAS("platform:mxc-ehci");
+
+static struct platform_driver ehci_mxc_driver = {
+	.probe = ehci_mxc_drv_probe,
+	.remove = __exit_p(ehci_mxc_drv_remove),
+	.shutdown = ehci_mxc_drv_shutdown,
+	.driver = {
+		   .name = "mxc-ehci",
+	},
+};
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
new file mode 100644
index 0000000..12f1ad2
--- /dev/null
+++ b/drivers/usb/host/ehci-omap.c
@@ -0,0 +1,756 @@
+/*
+ * ehci-omap.c - driver for USBHOST on OMAP 34xx processor
+ *
+ * Bus Glue for OMAP34xx USBHOST 3 port EHCI controller
+ * Tested on OMAP3430 ES2.0 SDP
+ *
+ * Copyright (C) 2007-2008 Texas Instruments, Inc.
+ *	Author: Vikram Pandita <vikram.pandita@ti.com>
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ *	Contact: Felipe Balbi <felipe.balbi@nokia.com>
+ *
+ * Based on "ehci-fsl.c" and "ehci-au1xxx.c" ehci glue layers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ *
+ * TODO (last updated Feb 23rd, 2009):
+ *	- add kernel-doc
+ *	- enable AUTOIDLE
+ *	- move DPLL5 programming to clock fw
+ *	- add suspend/resume
+ *	- move workarounds to board-files
+ */
+
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <mach/usb.h>
+
+/*
+ * OMAP USBHOST Register addresses: VIRTUAL ADDRESSES
+ *	Use ehci_omap_readl()/ehci_omap_writel() functions
+ */
+
+/* TLL Register Set */
+#define	OMAP_USBTLL_REVISION				(0x00)
+#define	OMAP_USBTLL_SYSCONFIG				(0x10)
+#define	OMAP_USBTLL_SYSCONFIG_CACTIVITY			(1 << 8)
+#define	OMAP_USBTLL_SYSCONFIG_SIDLEMODE			(1 << 3)
+#define	OMAP_USBTLL_SYSCONFIG_ENAWAKEUP			(1 << 2)
+#define	OMAP_USBTLL_SYSCONFIG_SOFTRESET			(1 << 1)
+#define	OMAP_USBTLL_SYSCONFIG_AUTOIDLE			(1 << 0)
+
+#define	OMAP_USBTLL_SYSSTATUS				(0x14)
+#define	OMAP_USBTLL_SYSSTATUS_RESETDONE			(1 << 0)
+
+#define	OMAP_USBTLL_IRQSTATUS				(0x18)
+#define	OMAP_USBTLL_IRQENABLE				(0x1C)
+
+#define	OMAP_TLL_SHARED_CONF				(0x30)
+#define	OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN		(1 << 6)
+#define	OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN		(1 << 5)
+#define	OMAP_TLL_SHARED_CONF_USB_DIVRATION		(1 << 2)
+#define	OMAP_TLL_SHARED_CONF_FCLK_REQ			(1 << 1)
+#define	OMAP_TLL_SHARED_CONF_FCLK_IS_ON			(1 << 0)
+
+#define	OMAP_TLL_CHANNEL_CONF(num)			(0x040 + 0x004 * num)
+#define	OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF		(1 << 11)
+#define	OMAP_TLL_CHANNEL_CONF_ULPI_ULPIAUTOIDLE		(1 << 10)
+#define	OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE		(1 << 9)
+#define	OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE		(1 << 8)
+#define	OMAP_TLL_CHANNEL_CONF_CHANEN			(1 << 0)
+
+#define	OMAP_TLL_ULPI_FUNCTION_CTRL(num)		(0x804 + 0x100 * num)
+#define	OMAP_TLL_ULPI_INTERFACE_CTRL(num)		(0x807 + 0x100 * num)
+#define	OMAP_TLL_ULPI_OTG_CTRL(num)			(0x80A + 0x100 * num)
+#define	OMAP_TLL_ULPI_INT_EN_RISE(num)			(0x80D + 0x100 * num)
+#define	OMAP_TLL_ULPI_INT_EN_FALL(num)			(0x810 + 0x100 * num)
+#define	OMAP_TLL_ULPI_INT_STATUS(num)			(0x813 + 0x100 * num)
+#define	OMAP_TLL_ULPI_INT_LATCH(num)			(0x814 + 0x100 * num)
+#define	OMAP_TLL_ULPI_DEBUG(num)			(0x815 + 0x100 * num)
+#define	OMAP_TLL_ULPI_SCRATCH_REGISTER(num)		(0x816 + 0x100 * num)
+
+#define OMAP_TLL_CHANNEL_COUNT				3
+#define OMAP_TLL_CHANNEL_1_EN_MASK			(1 << 1)
+#define OMAP_TLL_CHANNEL_2_EN_MASK			(1 << 2)
+#define OMAP_TLL_CHANNEL_3_EN_MASK			(1 << 4)
+
+/* UHH Register Set */
+#define	OMAP_UHH_REVISION				(0x00)
+#define	OMAP_UHH_SYSCONFIG				(0x10)
+#define	OMAP_UHH_SYSCONFIG_MIDLEMODE			(1 << 12)
+#define	OMAP_UHH_SYSCONFIG_CACTIVITY			(1 << 8)
+#define	OMAP_UHH_SYSCONFIG_SIDLEMODE			(1 << 3)
+#define	OMAP_UHH_SYSCONFIG_ENAWAKEUP			(1 << 2)
+#define	OMAP_UHH_SYSCONFIG_SOFTRESET			(1 << 1)
+#define	OMAP_UHH_SYSCONFIG_AUTOIDLE			(1 << 0)
+
+#define	OMAP_UHH_SYSSTATUS				(0x14)
+#define	OMAP_UHH_HOSTCONFIG				(0x40)
+#define	OMAP_UHH_HOSTCONFIG_ULPI_BYPASS			(1 << 0)
+#define	OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS		(1 << 0)
+#define	OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS		(1 << 11)
+#define	OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS		(1 << 12)
+#define OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN		(1 << 2)
+#define OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN		(1 << 3)
+#define OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN		(1 << 4)
+#define OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN		(1 << 5)
+#define OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS		(1 << 8)
+#define OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS		(1 << 9)
+#define OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS		(1 << 10)
+
+#define	OMAP_UHH_DEBUG_CSR				(0x44)
+
+/* EHCI Register Set */
+#define	EHCI_INSNREG05_ULPI				(0xA4)
+#define	EHCI_INSNREG05_ULPI_CONTROL_SHIFT		31
+#define	EHCI_INSNREG05_ULPI_PORTSEL_SHIFT		24
+#define	EHCI_INSNREG05_ULPI_OPSEL_SHIFT			22
+#define	EHCI_INSNREG05_ULPI_REGADD_SHIFT		16
+#define	EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT		8
+#define	EHCI_INSNREG05_ULPI_WRDATA_SHIFT		0
+
+/*-------------------------------------------------------------------------*/
+
+static inline void ehci_omap_writel(void __iomem *base, u32 reg, u32 val)
+{
+	__raw_writel(val, base + reg);
+}
+
+static inline u32 ehci_omap_readl(void __iomem *base, u32 reg)
+{
+	return __raw_readl(base + reg);
+}
+
+static inline void ehci_omap_writeb(void __iomem *base, u8 reg, u8 val)
+{
+	__raw_writeb(val, base + reg);
+}
+
+static inline u8 ehci_omap_readb(void __iomem *base, u8 reg)
+{
+	return __raw_readb(base + reg);
+}
+
+/*-------------------------------------------------------------------------*/
+
+struct ehci_hcd_omap {
+	struct ehci_hcd		*ehci;
+	struct device		*dev;
+
+	struct clk		*usbhost_ick;
+	struct clk		*usbhost2_120m_fck;
+	struct clk		*usbhost1_48m_fck;
+	struct clk		*usbtll_fck;
+	struct clk		*usbtll_ick;
+
+	/* FIXME the following two workarounds are
+	 * board specific not silicon-specific so these
+	 * should be moved to board-file instead.
+	 *
+	 * Maybe someone from TI will know better which
+	 * board is affected and needs the workarounds
+	 * to be applied
+	 */
+
+	/* gpio for resetting phy */
+	int			reset_gpio_port[OMAP3_HS_USB_PORTS];
+
+	/* phy reset workaround */
+	int			phy_reset;
+
+	/* desired phy_mode: TLL, PHY */
+	enum ehci_hcd_omap_mode	port_mode[OMAP3_HS_USB_PORTS];
+
+	void __iomem		*uhh_base;
+	void __iomem		*tll_base;
+	void __iomem		*ehci_base;
+};
+
+/*-------------------------------------------------------------------------*/
+
+static void omap_usb_utmi_init(struct ehci_hcd_omap *omap, u8 tll_channel_mask)
+{
+	unsigned reg;
+	int i;
+
+	/* Program the 3 TLL channels upfront */
+	for (i = 0; i < OMAP_TLL_CHANNEL_COUNT; i++) {
+		reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i));
+
+		/* Disable AutoIdle, BitStuffing and use SDR Mode */
+		reg &= ~(OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE
+				| OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF
+				| OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE);
+		ehci_omap_writel(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i), reg);
+	}
+
+	/* Program Common TLL register */
+	reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_SHARED_CONF);
+	reg |= (OMAP_TLL_SHARED_CONF_FCLK_IS_ON
+			| OMAP_TLL_SHARED_CONF_USB_DIVRATION
+			| OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN);
+	reg &= ~OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN;
+
+	ehci_omap_writel(omap->tll_base, OMAP_TLL_SHARED_CONF, reg);
+
+	/* Enable channels now */
+	for (i = 0; i < OMAP_TLL_CHANNEL_COUNT; i++) {
+		reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i));
+
+		/* Enable only the reg that is needed */
+		if (!(tll_channel_mask & 1<<i))
+			continue;
+
+		reg |= OMAP_TLL_CHANNEL_CONF_CHANEN;
+		ehci_omap_writel(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i), reg);
+
+		ehci_omap_writeb(omap->tll_base,
+				OMAP_TLL_ULPI_SCRATCH_REGISTER(i), 0xbe);
+		dev_dbg(omap->dev, "ULPI_SCRATCH_REG[ch=%d]= 0x%02x\n",
+				i+1, ehci_omap_readb(omap->tll_base,
+				OMAP_TLL_ULPI_SCRATCH_REGISTER(i)));
+	}
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* omap_start_ehc
+ *	- Start the TI USBHOST controller
+ */
+static int omap_start_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+	u8 tll_ch_mask = 0;
+	unsigned reg = 0;
+	int ret = 0;
+
+	dev_dbg(omap->dev, "starting TI EHCI USB Controller\n");
+
+	/* Enable Clocks for USBHOST */
+	omap->usbhost_ick = clk_get(omap->dev, "usbhost_ick");
+	if (IS_ERR(omap->usbhost_ick)) {
+		ret =  PTR_ERR(omap->usbhost_ick);
+		goto err_host_ick;
+	}
+	clk_enable(omap->usbhost_ick);
+
+	omap->usbhost2_120m_fck = clk_get(omap->dev, "usbhost_120m_fck");
+	if (IS_ERR(omap->usbhost2_120m_fck)) {
+		ret = PTR_ERR(omap->usbhost2_120m_fck);
+		goto err_host_120m_fck;
+	}
+	clk_enable(omap->usbhost2_120m_fck);
+
+	omap->usbhost1_48m_fck = clk_get(omap->dev, "usbhost_48m_fck");
+	if (IS_ERR(omap->usbhost1_48m_fck)) {
+		ret = PTR_ERR(omap->usbhost1_48m_fck);
+		goto err_host_48m_fck;
+	}
+	clk_enable(omap->usbhost1_48m_fck);
+
+	if (omap->phy_reset) {
+		/* Refer: ISSUE1 */
+		if (gpio_is_valid(omap->reset_gpio_port[0])) {
+			gpio_request(omap->reset_gpio_port[0],
+						"USB1 PHY reset");
+			gpio_direction_output(omap->reset_gpio_port[0], 0);
+		}
+
+		if (gpio_is_valid(omap->reset_gpio_port[1])) {
+			gpio_request(omap->reset_gpio_port[1],
+						"USB2 PHY reset");
+			gpio_direction_output(omap->reset_gpio_port[1], 0);
+		}
+
+		/* Hold the PHY in RESET for enough time till DIR is high */
+		udelay(10);
+	}
+
+	/* Configure TLL for 60Mhz clk for ULPI */
+	omap->usbtll_fck = clk_get(omap->dev, "usbtll_fck");
+	if (IS_ERR(omap->usbtll_fck)) {
+		ret = PTR_ERR(omap->usbtll_fck);
+		goto err_tll_fck;
+	}
+	clk_enable(omap->usbtll_fck);
+
+	omap->usbtll_ick = clk_get(omap->dev, "usbtll_ick");
+	if (IS_ERR(omap->usbtll_ick)) {
+		ret = PTR_ERR(omap->usbtll_ick);
+		goto err_tll_ick;
+	}
+	clk_enable(omap->usbtll_ick);
+
+	/* perform TLL soft reset, and wait until reset is complete */
+	ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
+			OMAP_USBTLL_SYSCONFIG_SOFTRESET);
+
+	/* Wait for TLL reset to complete */
+	while (!(ehci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
+			& OMAP_USBTLL_SYSSTATUS_RESETDONE)) {
+		cpu_relax();
+
+		if (time_after(jiffies, timeout)) {
+			dev_dbg(omap->dev, "operation timed out\n");
+			ret = -EINVAL;
+			goto err_sys_status;
+		}
+	}
+
+	dev_dbg(omap->dev, "TLL RESET DONE\n");
+
+	/* (1<<3) = no idle mode only for initial debugging */
+	ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
+			OMAP_USBTLL_SYSCONFIG_ENAWAKEUP |
+			OMAP_USBTLL_SYSCONFIG_SIDLEMODE |
+			OMAP_USBTLL_SYSCONFIG_CACTIVITY);
+
+
+	/* Put UHH in NoIdle/NoStandby mode */
+	reg = ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSCONFIG);
+	reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
+			| OMAP_UHH_SYSCONFIG_SIDLEMODE
+			| OMAP_UHH_SYSCONFIG_CACTIVITY
+			| OMAP_UHH_SYSCONFIG_MIDLEMODE);
+	reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
+
+	ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
+
+	reg = ehci_omap_readl(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
+
+	/* setup ULPI bypass and burst configurations */
+	reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN
+			| OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN
+			| OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN);
+	reg &= ~OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN;
+
+	if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_UNKNOWN)
+		reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS;
+	if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_UNKNOWN)
+		reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS;
+	if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_UNKNOWN)
+		reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
+
+	/* Bypass the TLL module for PHY mode operation */
+	 if (omap_rev() <= OMAP3430_REV_ES2_1) {
+		dev_dbg(omap->dev, "OMAP3 ES version <= ES2.1 \n");
+		if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY) ||
+			(omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY) ||
+				(omap->port_mode[2] == EHCI_HCD_OMAP_MODE_PHY))
+			reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
+		else
+			reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
+	} else {
+		dev_dbg(omap->dev, "OMAP3 ES version > ES2.1\n");
+		if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY)
+			reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
+		else if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL)
+			reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
+
+		if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY)
+			reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
+		else if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL)
+			reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
+
+		if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_PHY)
+			reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
+		else if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL)
+			reg |= OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
+
+	}
+	ehci_omap_writel(omap->uhh_base, OMAP_UHH_HOSTCONFIG, reg);
+	dev_dbg(omap->dev, "UHH setup done, uhh_hostconfig=%x\n", reg);
+
+
+	if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL) ||
+		(omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL) ||
+			(omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL)) {
+
+		if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL)
+			tll_ch_mask |= OMAP_TLL_CHANNEL_1_EN_MASK;
+		if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL)
+			tll_ch_mask |= OMAP_TLL_CHANNEL_2_EN_MASK;
+		if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL)
+			tll_ch_mask |= OMAP_TLL_CHANNEL_3_EN_MASK;
+
+		/* Enable UTMI mode for required TLL channels */
+		omap_usb_utmi_init(omap, tll_ch_mask);
+	}
+
+	if (omap->phy_reset) {
+		/* Refer ISSUE1:
+		 * Hold the PHY in RESET for enough time till
+		 * PHY is settled and ready
+		 */
+		udelay(10);
+
+		if (gpio_is_valid(omap->reset_gpio_port[0]))
+			gpio_set_value(omap->reset_gpio_port[0], 1);
+
+		if (gpio_is_valid(omap->reset_gpio_port[1]))
+			gpio_set_value(omap->reset_gpio_port[1], 1);
+	}
+
+	return 0;
+
+err_sys_status:
+	clk_disable(omap->usbtll_ick);
+	clk_put(omap->usbtll_ick);
+
+err_tll_ick:
+	clk_disable(omap->usbtll_fck);
+	clk_put(omap->usbtll_fck);
+
+err_tll_fck:
+	clk_disable(omap->usbhost1_48m_fck);
+	clk_put(omap->usbhost1_48m_fck);
+
+	if (omap->phy_reset) {
+		if (gpio_is_valid(omap->reset_gpio_port[0]))
+			gpio_free(omap->reset_gpio_port[0]);
+
+		if (gpio_is_valid(omap->reset_gpio_port[1]))
+			gpio_free(omap->reset_gpio_port[1]);
+	}
+
+err_host_48m_fck:
+	clk_disable(omap->usbhost2_120m_fck);
+	clk_put(omap->usbhost2_120m_fck);
+
+err_host_120m_fck:
+	clk_disable(omap->usbhost_ick);
+	clk_put(omap->usbhost_ick);
+
+err_host_ick:
+	return ret;
+}
+
+static void omap_stop_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(100);
+
+	dev_dbg(omap->dev, "stopping TI EHCI USB Controller\n");
+
+	/* Reset OMAP modules for insmod/rmmod to work */
+	ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG,
+			OMAP_UHH_SYSCONFIG_SOFTRESET);
+	while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
+				& (1 << 0))) {
+		cpu_relax();
+
+		if (time_after(jiffies, timeout))
+			dev_dbg(omap->dev, "operation timed out\n");
+	}
+
+	while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
+				& (1 << 1))) {
+		cpu_relax();
+
+		if (time_after(jiffies, timeout))
+			dev_dbg(omap->dev, "operation timed out\n");
+	}
+
+	while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
+				& (1 << 2))) {
+		cpu_relax();
+
+		if (time_after(jiffies, timeout))
+			dev_dbg(omap->dev, "operation timed out\n");
+	}
+
+	ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG, (1 << 1));
+
+	while (!(ehci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
+				& (1 << 0))) {
+		cpu_relax();
+
+		if (time_after(jiffies, timeout))
+			dev_dbg(omap->dev, "operation timed out\n");
+	}
+
+	if (omap->usbtll_fck != NULL) {
+		clk_disable(omap->usbtll_fck);
+		clk_put(omap->usbtll_fck);
+		omap->usbtll_fck = NULL;
+	}
+
+	if (omap->usbhost_ick != NULL) {
+		clk_disable(omap->usbhost_ick);
+		clk_put(omap->usbhost_ick);
+		omap->usbhost_ick = NULL;
+	}
+
+	if (omap->usbhost1_48m_fck != NULL) {
+		clk_disable(omap->usbhost1_48m_fck);
+		clk_put(omap->usbhost1_48m_fck);
+		omap->usbhost1_48m_fck = NULL;
+	}
+
+	if (omap->usbhost2_120m_fck != NULL) {
+		clk_disable(omap->usbhost2_120m_fck);
+		clk_put(omap->usbhost2_120m_fck);
+		omap->usbhost2_120m_fck = NULL;
+	}
+
+	if (omap->usbtll_ick != NULL) {
+		clk_disable(omap->usbtll_ick);
+		clk_put(omap->usbtll_ick);
+		omap->usbtll_ick = NULL;
+	}
+
+	if (omap->phy_reset) {
+		if (gpio_is_valid(omap->reset_gpio_port[0]))
+			gpio_free(omap->reset_gpio_port[0]);
+
+		if (gpio_is_valid(omap->reset_gpio_port[1]))
+			gpio_free(omap->reset_gpio_port[1]);
+	}
+
+	dev_dbg(omap->dev, "Clock to USB host has been disabled\n");
+}
+
+/*-------------------------------------------------------------------------*/
+
+static const struct hc_driver ehci_omap_hc_driver;
+
+/* configure so an HC device and id are always provided */
+/* always called with process context; sleeping is OK */
+
+/**
+ * ehci_hcd_omap_probe - initialize TI-based HCDs
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ */
+static int ehci_hcd_omap_probe(struct platform_device *pdev)
+{
+	struct ehci_hcd_omap_platform_data *pdata = pdev->dev.platform_data;
+	struct ehci_hcd_omap *omap;
+	struct resource *res;
+	struct usb_hcd *hcd;
+
+	int irq = platform_get_irq(pdev, 0);
+	int ret = -ENODEV;
+
+	if (!pdata) {
+		dev_dbg(&pdev->dev, "missing platform_data\n");
+		goto err_pdata;
+	}
+
+	if (usb_disabled())
+		goto err_disabled;
+
+	omap = kzalloc(sizeof(*omap), GFP_KERNEL);
+	if (!omap) {
+		ret = -ENOMEM;
+		goto err_disabled;
+	}
+
+	hcd = usb_create_hcd(&ehci_omap_hc_driver, &pdev->dev,
+			dev_name(&pdev->dev));
+	if (!hcd) {
+		dev_dbg(&pdev->dev, "failed to create hcd with err %d\n", ret);
+		ret = -ENOMEM;
+		goto err_create_hcd;
+	}
+
+	platform_set_drvdata(pdev, omap);
+	omap->dev		= &pdev->dev;
+	omap->phy_reset		= pdata->phy_reset;
+	omap->reset_gpio_port[0]	= pdata->reset_gpio_port[0];
+	omap->reset_gpio_port[1]	= pdata->reset_gpio_port[1];
+	omap->reset_gpio_port[2]	= pdata->reset_gpio_port[2];
+	omap->port_mode[0]		= pdata->port_mode[0];
+	omap->port_mode[1]		= pdata->port_mode[1];
+	omap->port_mode[2]		= pdata->port_mode[2];
+	omap->ehci		= hcd_to_ehci(hcd);
+	omap->ehci->sbrn	= 0x20;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+
+	hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+	if (!hcd->regs) {
+		dev_err(&pdev->dev, "EHCI ioremap failed\n");
+		ret = -ENOMEM;
+		goto err_ioremap;
+	}
+
+	/* we know this is the memory we want, no need to ioremap again */
+	omap->ehci->caps = hcd->regs;
+	omap->ehci_base = hcd->regs;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	omap->uhh_base = ioremap(res->start, resource_size(res));
+	if (!omap->uhh_base) {
+		dev_err(&pdev->dev, "UHH ioremap failed\n");
+		ret = -ENOMEM;
+		goto err_uhh_ioremap;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+	omap->tll_base = ioremap(res->start, resource_size(res));
+	if (!omap->tll_base) {
+		dev_err(&pdev->dev, "TLL ioremap failed\n");
+		ret = -ENOMEM;
+		goto err_tll_ioremap;
+	}
+
+	ret = omap_start_ehc(omap, hcd);
+	if (ret) {
+		dev_dbg(&pdev->dev, "failed to start ehci\n");
+		goto err_start;
+	}
+
+	omap->ehci->regs = hcd->regs
+		+ HC_LENGTH(readl(&omap->ehci->caps->hc_capbase));
+
+	/* cache this readonly data; minimize chip reads */
+	omap->ehci->hcs_params = readl(&omap->ehci->caps->hcs_params);
+
+	/* SET 1 micro-frame Interrupt interval */
+	writel(readl(&omap->ehci->regs->command) | (1 << 16),
+			&omap->ehci->regs->command);
+
+	ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+	if (ret) {
+		dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
+		goto err_add_hcd;
+	}
+
+	return 0;
+
+err_add_hcd:
+	omap_stop_ehc(omap, hcd);
+
+err_start:
+	iounmap(omap->tll_base);
+
+err_tll_ioremap:
+	iounmap(omap->uhh_base);
+
+err_uhh_ioremap:
+	iounmap(hcd->regs);
+
+err_ioremap:
+	usb_put_hcd(hcd);
+
+err_create_hcd:
+	kfree(omap);
+err_disabled:
+err_pdata:
+	return ret;
+}
+
+/* may be called without controller electrically present */
+/* may be called with controller, bus, and devices active */
+
+/**
+ * ehci_hcd_omap_remove - shutdown processing for EHCI HCDs
+ * @pdev: USB Host Controller being removed
+ *
+ * Reverses the effect of usb_ehci_hcd_omap_probe(), first invoking
+ * the HCD's stop() method.  It is always called from a thread
+ * context, normally "rmmod", "apmd", or something similar.
+ */
+static int ehci_hcd_omap_remove(struct platform_device *pdev)
+{
+	struct ehci_hcd_omap *omap = platform_get_drvdata(pdev);
+	struct usb_hcd *hcd = ehci_to_hcd(omap->ehci);
+
+	usb_remove_hcd(hcd);
+	omap_stop_ehc(omap, hcd);
+	iounmap(hcd->regs);
+	iounmap(omap->tll_base);
+	iounmap(omap->uhh_base);
+	usb_put_hcd(hcd);
+
+	return 0;
+}
+
+static void ehci_hcd_omap_shutdown(struct platform_device *pdev)
+{
+	struct ehci_hcd_omap *omap = platform_get_drvdata(pdev);
+	struct usb_hcd *hcd = ehci_to_hcd(omap->ehci);
+
+	if (hcd->driver->shutdown)
+		hcd->driver->shutdown(hcd);
+}
+
+static struct platform_driver ehci_hcd_omap_driver = {
+	.probe			= ehci_hcd_omap_probe,
+	.remove			= ehci_hcd_omap_remove,
+	.shutdown		= ehci_hcd_omap_shutdown,
+	/*.suspend		= ehci_hcd_omap_suspend, */
+	/*.resume		= ehci_hcd_omap_resume, */
+	.driver = {
+		.name		= "ehci-omap",
+	}
+};
+
+/*-------------------------------------------------------------------------*/
+
+static const struct hc_driver ehci_omap_hc_driver = {
+	.description		= hcd_name,
+	.product_desc		= "OMAP-EHCI Host Controller",
+	.hcd_priv_size		= sizeof(struct ehci_hcd),
+
+	/*
+	 * generic hardware linkage
+	 */
+	.irq			= ehci_irq,
+	.flags			= HCD_MEMORY | HCD_USB2,
+
+	/*
+	 * basic lifecycle operations
+	 */
+	.reset			= ehci_init,
+	.start			= ehci_run,
+	.stop			= ehci_stop,
+	.shutdown		= ehci_shutdown,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue		= ehci_urb_enqueue,
+	.urb_dequeue		= ehci_urb_dequeue,
+	.endpoint_disable	= ehci_endpoint_disable,
+	.endpoint_reset		= ehci_endpoint_reset,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number	= ehci_get_frame,
+
+	/*
+	 * root hub support
+	 */
+	.hub_status_data	= ehci_hub_status_data,
+	.hub_control		= ehci_hub_control,
+	.bus_suspend		= ehci_bus_suspend,
+	.bus_resume		= ehci_bus_resume,
+
+	.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+MODULE_ALIAS("platform:omap-ehci");
+MODULE_AUTHOR("Texas Instruments, Inc.");
+MODULE_AUTHOR("Felipe Balbi <felipe.balbi@nokia.com>");
+
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 139a2cc..a427d3b 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -616,9 +616,11 @@
 ) {
 	struct ehci_qtd		*qtd, *qtd_prev;
 	dma_addr_t		buf;
-	int			len, maxpacket;
+	int			len, this_sg_len, maxpacket;
 	int			is_input;
 	u32			token;
+	int			i;
+	struct scatterlist	*sg;
 
 	/*
 	 * URBs map to sequences of QTDs:  one logical transaction
@@ -659,7 +661,20 @@
 	/*
 	 * data transfer stage:  buffer setup
 	 */
-	buf = urb->transfer_dma;
+	i = urb->num_sgs;
+	if (len > 0 && i > 0) {
+		sg = urb->sg->sg;
+		buf = sg_dma_address(sg);
+
+		/* urb->transfer_buffer_length may be smaller than the
+		 * size of the scatterlist (or vice versa)
+		 */
+		this_sg_len = min_t(int, sg_dma_len(sg), len);
+	} else {
+		sg = NULL;
+		buf = urb->transfer_dma;
+		this_sg_len = len;
+	}
 
 	if (is_input)
 		token |= (1 /* "in" */ << 8);
@@ -675,7 +690,9 @@
 	for (;;) {
 		int this_qtd_len;
 
-		this_qtd_len = qtd_fill(ehci, qtd, buf, len, token, maxpacket);
+		this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token,
+				maxpacket);
+		this_sg_len -= this_qtd_len;
 		len -= this_qtd_len;
 		buf += this_qtd_len;
 
@@ -691,8 +708,13 @@
 		if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
 			token ^= QTD_TOGGLE;
 
-		if (likely (len <= 0))
-			break;
+		if (likely(this_sg_len <= 0)) {
+			if (--i <= 0 || len <= 0)
+				break;
+			sg = sg_next(sg);
+			buf = sg_dma_address(sg);
+			this_sg_len = min_t(int, sg_dma_len(sg), len);
+		}
 
 		qtd_prev = qtd;
 		qtd = ehci_qtd_alloc (ehci, flags);
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index a5535b5..1e391e6 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1385,7 +1385,7 @@
  * given EHCI_TUNE_FLS and the slop).  Or, write a smarter scheduler!
  */
 
-#define SCHEDULE_SLOP	10	/* frames */
+#define SCHEDULE_SLOP	80	/* microframes */
 
 static int
 iso_stream_schedule (
@@ -1394,12 +1394,13 @@
 	struct ehci_iso_stream	*stream
 )
 {
-	u32			now, start, max, period;
+	u32			now, next, start, period;
 	int			status;
 	unsigned		mod = ehci->periodic_size << 3;
 	struct ehci_iso_sched	*sched = urb->hcpriv;
+	struct pci_dev		*pdev;
 
-	if (sched->span > (mod - 8 * SCHEDULE_SLOP)) {
+	if (sched->span > (mod - SCHEDULE_SLOP)) {
 		ehci_dbg (ehci, "iso request %p too long\n", urb);
 		status = -EFBIG;
 		goto fail;
@@ -1418,26 +1419,35 @@
 
 	now = ehci_readl(ehci, &ehci->regs->frame_index) % mod;
 
-	/* when's the last uframe this urb could start? */
-	max = now + mod;
-
 	/* Typical case: reuse current schedule, stream is still active.
 	 * Hopefully there are no gaps from the host falling behind
 	 * (irq delays etc), but if there are we'll take the next
 	 * slot in the schedule, implicitly assuming URB_ISO_ASAP.
 	 */
 	if (likely (!list_empty (&stream->td_list))) {
+		pdev = to_pci_dev(ehci_to_hcd(ehci)->self.controller);
 		start = stream->next_uframe;
-		if (start < now)
-			start += mod;
+
+		/* For high speed devices, allow scheduling within the
+		 * isochronous scheduling threshold.  For full speed devices,
+		 * don't. (Work around for Intel ICH9 bug.)
+		 */
+		if (!stream->highspeed &&
+				pdev->vendor == PCI_VENDOR_ID_INTEL)
+			next = now + ehci->i_thresh;
+		else
+			next = now;
 
 		/* Fell behind (by up to twice the slop amount)? */
-		if (start >= max - 2 * 8 * SCHEDULE_SLOP)
+		if (((start - next) & (mod - 1)) >=
+				mod - 2 * SCHEDULE_SLOP)
 			start += period * DIV_ROUND_UP(
-					max - start, period) - mod;
+					(next - start) & (mod - 1),
+					period);
 
 		/* Tried to schedule too far into the future? */
-		if (unlikely((start + sched->span) >= max)) {
+		if (unlikely(((start - now) & (mod - 1)) + sched->span
+					>= mod - 2 * SCHEDULE_SLOP)) {
 			status = -EFBIG;
 			goto fail;
 		}
@@ -1451,7 +1461,7 @@
 	 * can also help high bandwidth if the dma and irq loads don't
 	 * jump until after the queue is primed.
 	 */
-	start = SCHEDULE_SLOP * 8 + (now & ~0x07);
+	start = SCHEDULE_SLOP + (now & ~0x07);
 	start %= mod;
 	stream->next_uframe = start;
 
@@ -1482,7 +1492,7 @@
 	/* no room in the schedule */
 	ehci_dbg (ehci, "iso %ssched full %p (now %d max %d)\n",
 		list_empty (&stream->td_list) ? "" : "re",
-		urb, now, max);
+		urb, now, now + mod);
 	status = -ENOSPC;
 
 fail:
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
new file mode 100644
index 0000000..a586153
--- /dev/null
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -0,0 +1,300 @@
+/*
+ * EHCI HCD (Host Controller Driver) for USB.
+ *
+ * Bus Glue for Xilinx EHCI core on the of_platform bus
+ *
+ * Copyright (c) 2009 Xilinx, Inc.
+ *
+ * Based on "ehci-ppc-of.c" by Valentine Barshak <vbarshak@ru.mvista.com>
+ * and "ehci-ppc-soc.c" by Stefan Roese <sr@denx.de>
+ * and "ohci-ppc-of.c" by Sylvain Munaut <tnt@246tNt.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/signal.h>
+
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+/**
+ * ehci_xilinx_of_setup - Initialize the device for ehci_reset()
+ * @hcd:	Pointer to the usb_hcd device to which the host controller bound
+ *
+ * called during probe() after chip reset completes.
+ */
+static int ehci_xilinx_of_setup(struct usb_hcd *hcd)
+{
+	struct ehci_hcd	*ehci = hcd_to_ehci(hcd);
+	int		retval;
+
+	retval = ehci_halt(ehci);
+	if (retval)
+		return retval;
+
+	retval = ehci_init(hcd);
+	if (retval)
+		return retval;
+
+	ehci->sbrn = 0x20;
+
+	return ehci_reset(ehci);
+}
+
+/**
+ * ehci_xilinx_port_handed_over - hand the port out if failed to enable it
+ * @hcd:	Pointer to the usb_hcd device to which the host controller bound
+ * @portnum:Port number to which the device is attached.
+ *
+ * This function is used as a place to tell the user that the Xilinx USB host
+ * controller does support LS devices. And in an HS only configuration, it
+ * does not support FS devices either. It is hoped that this can help a
+ * confused user.
+ *
+ * There are cases when the host controller fails to enable the port due to,
+ * for example, insufficient power that can be supplied to the device from
+ * the USB bus. In those cases, the messages printed here are not helpful.
+ */
+static int ehci_xilinx_port_handed_over(struct usb_hcd *hcd, int portnum)
+{
+	dev_warn(hcd->self.controller, "port %d cannot be enabled\n", portnum);
+	if (hcd->has_tt) {
+		dev_warn(hcd->self.controller,
+			"Maybe you have connected a low speed device?\n");
+
+		dev_warn(hcd->self.controller,
+			"We do not support low speed devices\n");
+	} else {
+		dev_warn(hcd->self.controller,
+			"Maybe your device is not a high speed device?\n");
+		dev_warn(hcd->self.controller,
+			"The USB host controller does not support full speed "
+			"nor low speed devices\n");
+		dev_warn(hcd->self.controller,
+			"You can reconfigure the host controller to have "
+			"full speed support\n");
+	}
+
+	return 0;
+}
+
+
+static const struct hc_driver ehci_xilinx_of_hc_driver = {
+	.description		= hcd_name,
+	.product_desc		= "OF EHCI",
+	.hcd_priv_size		= sizeof(struct ehci_hcd),
+
+	/*
+	 * generic hardware linkage
+	 */
+	.irq			= ehci_irq,
+	.flags			= HCD_MEMORY | HCD_USB2,
+
+	/*
+	 * basic lifecycle operations
+	 */
+	.reset			= ehci_xilinx_of_setup,
+	.start			= ehci_run,
+	.stop			= ehci_stop,
+	.shutdown		= ehci_shutdown,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue		= ehci_urb_enqueue,
+	.urb_dequeue		= ehci_urb_dequeue,
+	.endpoint_disable	= ehci_endpoint_disable,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number	= ehci_get_frame,
+
+	/*
+	 * root hub support
+	 */
+	.hub_status_data	= ehci_hub_status_data,
+	.hub_control		= ehci_hub_control,
+#ifdef	CONFIG_PM
+	.bus_suspend		= ehci_bus_suspend,
+	.bus_resume		= ehci_bus_resume,
+#endif
+	.relinquish_port	= NULL,
+	.port_handed_over	= ehci_xilinx_port_handed_over,
+
+	.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+/**
+ * ehci_hcd_xilinx_of_probe - Probe method for the USB host controller
+ * @op:		pointer to the of_device to which the host controller bound
+ * @match:	pointer to of_device_id structure, not used
+ *
+ * This function requests resources and sets up appropriate properties for the
+ * host controller. Because the Xilinx USB host controller can be configured
+ * as HS only or HS/FS only, it checks the configuration in the device tree
+ * entry, and sets an appropriate value for hcd->has_tt.
+ */
+static int __devinit
+ehci_hcd_xilinx_of_probe(struct of_device *op, const struct of_device_id *match)
+{
+	struct device_node *dn = op->node;
+	struct usb_hcd *hcd;
+	struct ehci_hcd	*ehci;
+	struct resource res;
+	int irq;
+	int rv;
+	int *value;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	dev_dbg(&op->dev, "initializing XILINX-OF USB Controller\n");
+
+	rv = of_address_to_resource(dn, 0, &res);
+	if (rv)
+		return rv;
+
+	hcd = usb_create_hcd(&ehci_xilinx_of_hc_driver, &op->dev,
+				"XILINX-OF USB");
+	if (!hcd)
+		return -ENOMEM;
+
+	hcd->rsrc_start = res.start;
+	hcd->rsrc_len = res.end - res.start + 1;
+
+	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
+		printk(KERN_ERR __FILE__ ": request_mem_region failed\n");
+		rv = -EBUSY;
+		goto err_rmr;
+	}
+
+	irq = irq_of_parse_and_map(dn, 0);
+	if (irq == NO_IRQ) {
+		printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n");
+		rv = -EBUSY;
+		goto err_irq;
+	}
+
+	hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+	if (!hcd->regs) {
+		printk(KERN_ERR __FILE__ ": ioremap failed\n");
+		rv = -ENOMEM;
+		goto err_ioremap;
+	}
+
+	ehci = hcd_to_ehci(hcd);
+
+	/* This core always has big-endian register interface and uses
+	 * big-endian memory descriptors.
+	 */
+	ehci->big_endian_mmio = 1;
+	ehci->big_endian_desc = 1;
+
+	/* Check whether the FS support option is selected in the hardware.
+	 */
+	value = (int *)of_get_property(dn, "xlnx,support-usb-fs", NULL);
+	if (value && (*value == 1)) {
+		ehci_dbg(ehci, "USB host controller supports FS devices\n");
+		hcd->has_tt = 1;
+	} else {
+		ehci_dbg(ehci,
+			"USB host controller is HS only\n");
+		hcd->has_tt = 0;
+	}
+
+	/* Debug registers are at the first 0x100 region
+	 */
+	ehci->caps = hcd->regs + 0x100;
+	ehci->regs = hcd->regs + 0x100 +
+			HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
+
+	/* cache this readonly data; minimize chip reads */
+	ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+
+	rv = usb_add_hcd(hcd, irq, 0);
+	if (rv == 0)
+		return 0;
+
+	iounmap(hcd->regs);
+
+err_ioremap:
+err_irq:
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err_rmr:
+	usb_put_hcd(hcd);
+
+	return rv;
+}
+
+/**
+ * ehci_hcd_xilinx_of_remove - shutdown hcd and release resources
+ * @op:		pointer to of_device structure that is to be removed
+ *
+ * Remove the hcd structure, and release resources that has been requested
+ * during probe.
+ */
+static int ehci_hcd_xilinx_of_remove(struct of_device *op)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
+	dev_set_drvdata(&op->dev, NULL);
+
+	dev_dbg(&op->dev, "stopping XILINX-OF USB Controller\n");
+
+	usb_remove_hcd(hcd);
+
+	iounmap(hcd->regs);
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+
+	usb_put_hcd(hcd);
+
+	return 0;
+}
+
+/**
+ * ehci_hcd_xilinx_of_shutdown - shutdown the hcd
+ * @op:		pointer to of_device structure that is to be removed
+ *
+ * Properly shutdown the hcd, call driver's shutdown routine.
+ */
+static int ehci_hcd_xilinx_of_shutdown(struct of_device *op)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
+
+	if (hcd->driver->shutdown)
+		hcd->driver->shutdown(hcd);
+
+	return 0;
+}
+
+
+static struct of_device_id ehci_hcd_xilinx_of_match[] = {
+		{.compatible = "xlnx,xps-usb-host-1.00.a",},
+	{},
+};
+MODULE_DEVICE_TABLE(of, ehci_hcd_xilinx_of_match);
+
+static struct of_platform_driver ehci_hcd_xilinx_of_driver = {
+	.name		= "xilinx-of-ehci",
+	.match_table	= ehci_hcd_xilinx_of_match,
+	.probe		= ehci_hcd_xilinx_of_probe,
+	.remove		= ehci_hcd_xilinx_of_remove,
+	.shutdown	= ehci_hcd_xilinx_of_shutdown,
+	.driver		= {
+		.name	= "xilinx-of-ehci",
+		.owner	= THIS_MODULE,
+	},
+};
diff --git a/drivers/usb/host/isp1362.h b/drivers/usb/host/isp1362.h
index 1a253eb..5151516 100644
--- a/drivers/usb/host/isp1362.h
+++ b/drivers/usb/host/isp1362.h
@@ -534,8 +534,8 @@
 
 	/* periodic schedule: isochronous */
 	struct list_head	isoc;
-	int			istl_flip:1;
-	int			irq_active:1;
+	unsigned int		istl_flip:1;
+	unsigned int		irq_active:1;
 
 	/* Schedules for the current frame */
 	struct isp1362_ep_queue atl_queue;
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 7ccffcb..68b83ab 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -35,7 +35,7 @@
 
 static void at91_start_clock(void)
 {
-	if (cpu_is_at91sam9261())
+	if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
 		clk_enable(hclk);
 	clk_enable(iclk);
 	clk_enable(fclk);
@@ -46,7 +46,7 @@
 {
 	clk_disable(fclk);
 	clk_disable(iclk);
-	if (cpu_is_at91sam9261())
+	if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
 		clk_disable(hclk);
 	clocked = 0;
 }
@@ -142,7 +142,7 @@
 
 	iclk = clk_get(&pdev->dev, "ohci_clk");
 	fclk = clk_get(&pdev->dev, "uhpck");
-	if (cpu_is_at91sam9261())
+	if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
 		hclk = clk_get(&pdev->dev, "hck0");
 
 	at91_start_hc(pdev);
@@ -155,7 +155,7 @@
 	/* Error handling */
 	at91_stop_hc(pdev);
 
-	if (cpu_is_at91sam9261())
+	if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
 		clk_put(hclk);
 	clk_put(fclk);
 	clk_put(iclk);
@@ -192,7 +192,7 @@
 	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
 	usb_put_hcd(hcd);
 
-	if (cpu_is_at91sam9261())
+	if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
 		clk_put(hclk);
 	clk_put(fclk);
 	clk_put(iclk);
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c
index 100bf3d..2769326 100644
--- a/drivers/usb/host/ohci-pnx4008.c
+++ b/drivers/usb/host/ohci-pnx4008.c
@@ -98,8 +98,8 @@
 #define ISP1301_I2C_INTERRUPT_RISING 0xE
 #define ISP1301_I2C_REG_CLEAR_ADDR 1
 
-struct i2c_driver isp1301_driver;
-struct i2c_client *isp1301_i2c_client;
+static struct i2c_driver isp1301_driver;
+static struct i2c_client *isp1301_i2c_client;
 
 extern int usb_disabled(void);
 extern int ocpi_enable(void);
@@ -120,12 +120,12 @@
 	return 0;
 }
 
-const struct i2c_device_id isp1301_id[] = {
+static const struct i2c_device_id isp1301_id[] = {
 	{ "isp1301_pnx", 0 },
 	{ }
 };
 
-struct i2c_driver isp1301_driver = {
+static struct i2c_driver isp1301_driver = {
 	.driver = {
 		.name = "isp1301_pnx",
 	},
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index e33d362..41dbc70 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -822,8 +822,6 @@
 		return;
 
 	list_for_each_entry_safe(td, next, list, queue) {
-		if (!td)
-			continue;
 		if (td->address != address)
 			continue;
 
@@ -2025,8 +2023,6 @@
 	struct list_head *list = &r8a66597->child_device;
 
 	list_for_each_entry(dev, list, device_list) {
-		if (!dev)
-			continue;
 		if (dev->usb_address != addr)
 			continue;
 
diff --git a/drivers/usb/host/whci/debug.c b/drivers/usb/host/whci/debug.c
index 2273c81..8c1c610 100644
--- a/drivers/usb/host/whci/debug.c
+++ b/drivers/usb/host/whci/debug.c
@@ -31,17 +31,29 @@
 
 void qset_print(struct seq_file *s, struct whc_qset *qset)
 {
+	static const char *qh_type[] = {
+		"ctrl", "isoc", "bulk", "intr", "rsvd", "rsvd", "rsvd", "lpintr", };
 	struct whc_std *std;
 	struct urb *urb = NULL;
 	int i;
 
-	seq_printf(s, "qset %08x\n", (u32)qset->qset_dma);
+	seq_printf(s, "qset %08x", (u32)qset->qset_dma);
+	if (&qset->list_node == qset->whc->async_list.prev) {
+		seq_printf(s, " (dummy)\n");
+	} else {
+		seq_printf(s, " ep%d%s-%s maxpkt: %d\n",
+			   qset->qh.info1 & 0x0f,
+			   (qset->qh.info1 >> 4) & 0x1 ? "in" : "out",
+			   qh_type[(qset->qh.info1 >> 5) & 0x7],
+			   (qset->qh.info1 >> 16) & 0xffff);
+	}
 	seq_printf(s, "  -> %08x\n", (u32)qset->qh.link);
 	seq_printf(s, "  info: %08x %08x %08x\n",
-		qset->qh.info1, qset->qh.info2,  qset->qh.info3);
-	seq_printf(s, "  sts: %04x errs: %d\n", qset->qh.status, qset->qh.err_count);
+		   qset->qh.info1, qset->qh.info2,  qset->qh.info3);
+	seq_printf(s, "  sts: %04x errs: %d curwin: %08x\n",
+		   qset->qh.status, qset->qh.err_count, qset->qh.cur_window);
 	seq_printf(s, "  TD: sts: %08x opts: %08x\n",
-		qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options);
+		   qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options);
 
 	for (i = 0; i < WHCI_QSET_TD_MAX; i++) {
 		seq_printf(s, "  %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n",
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c
index 687b622..e0d3401 100644
--- a/drivers/usb/host/whci/hcd.c
+++ b/drivers/usb/host/whci/hcd.c
@@ -250,6 +250,7 @@
 	}
 
 	usb_hcd->wireless = 1;
+	usb_hcd->self.sg_tablesize = 2048; /* somewhat arbitrary */
 
 	wusbhc = usb_hcd_to_wusbhc(usb_hcd);
 	whc = wusbhc_to_whc(wusbhc);
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index 1b9dc15..7d4204d 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -49,16 +49,19 @@
  *        state
  * @urb:  an urb for a transfer to this endpoint
  */
-static void qset_fill_qh(struct whc_qset *qset, struct urb *urb)
+static void qset_fill_qh(struct whc *whc, struct whc_qset *qset, struct urb *urb)
 {
 	struct usb_device *usb_dev = urb->dev;
+	struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
 	struct usb_wireless_ep_comp_descriptor *epcd;
 	bool is_out;
+	uint8_t phy_rate;
 
 	is_out = usb_pipeout(urb->pipe);
 
-	epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
+	qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize);
 
+	epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
 	if (epcd) {
 		qset->max_seq = epcd->bMaxSequence;
 		qset->max_burst = epcd->bMaxBurst;
@@ -67,12 +70,28 @@
 		qset->max_burst = 1;
 	}
 
+	/*
+	 * Initial PHY rate is 53.3 Mbit/s for control endpoints or
+	 * the maximum supported by the device for other endpoints
+	 * (unless limited by the user).
+	 */
+	if (usb_pipecontrol(urb->pipe))
+		phy_rate = UWB_PHY_RATE_53;
+	else {
+		uint16_t phy_rates;
+
+		phy_rates = le16_to_cpu(wusb_dev->wusb_cap_descr->wPHYRates);
+		phy_rate = fls(phy_rates) - 1;
+		if (phy_rate > whc->wusbhc.phy_rate)
+			phy_rate = whc->wusbhc.phy_rate;
+	}
+
 	qset->qh.info1 = cpu_to_le32(
 		QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
 		| (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
 		| usb_pipe_to_qh_type(urb->pipe)
 		| QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
-		| QH_INFO1_MAX_PKT_LEN(usb_maxpacket(urb->dev, urb->pipe, is_out))
+		| QH_INFO1_MAX_PKT_LEN(qset->max_packet)
 		);
 	qset->qh.info2 = cpu_to_le32(
 		QH_INFO2_BURST(qset->max_burst)
@@ -86,7 +105,7 @@
 	 * strength and can presumably guess the Tx power required
 	 * from that? */
 	qset->qh.info3 = cpu_to_le32(
-		QH_INFO3_TX_RATE_53_3
+		QH_INFO3_TX_RATE(phy_rate)
 		| QH_INFO3_TX_PWR(0) /* 0 == max power */
 		);
 
@@ -148,7 +167,7 @@
 
 		qset->ep = urb->ep;
 		urb->ep->hcpriv = qset;
-		qset_fill_qh(qset, urb);
+		qset_fill_qh(whc, qset, urb);
 	}
 	return qset;
 }
@@ -241,6 +260,36 @@
 	qset->ntds--;
 }
 
+static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std)
+{
+	struct scatterlist *sg;
+	void *bounce;
+	size_t remaining, offset;
+
+	bounce = std->bounce_buf;
+	remaining = std->len;
+
+	sg = std->bounce_sg;
+	offset = std->bounce_offset;
+
+	while (remaining) {
+		size_t len;
+
+		len = min(sg->length - offset, remaining);
+		memcpy(sg_virt(sg) + offset, bounce, len);
+
+		bounce += len;
+		remaining -= len;
+
+		offset += len;
+		if (offset >= sg->length) {
+			sg = sg_next(sg);
+			offset = 0;
+		}
+	}
+
+}
+
 /**
  * qset_free_std - remove an sTD and free it.
  * @whc: the WHCI host controller
@@ -249,13 +298,29 @@
 void qset_free_std(struct whc *whc, struct whc_std *std)
 {
 	list_del(&std->list_node);
-	if (std->num_pointers) {
-		dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
-				 std->num_pointers * sizeof(struct whc_page_list_entry),
-				 DMA_TO_DEVICE);
-		kfree(std->pl_virt);
-	}
+	if (std->bounce_buf) {
+		bool is_out = usb_pipeout(std->urb->pipe);
+		dma_addr_t dma_addr;
 
+		if (std->num_pointers)
+			dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr);
+		else
+			dma_addr = std->dma_addr;
+
+		dma_unmap_single(whc->wusbhc.dev, dma_addr,
+				 std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+		if (!is_out)
+			qset_copy_bounce_to_sg(whc, std);
+		kfree(std->bounce_buf);
+	}
+	if (std->pl_virt) {
+		if (std->dma_addr)
+			dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
+					 std->num_pointers * sizeof(struct whc_page_list_entry),
+					 DMA_TO_DEVICE);
+		kfree(std->pl_virt);
+		std->pl_virt = NULL;
+	}
 	kfree(std);
 }
 
@@ -293,12 +358,17 @@
 {
 	dma_addr_t dma_addr = std->dma_addr;
 	dma_addr_t sp, ep;
-	size_t std_len = std->len;
 	size_t pl_len;
 	int p;
 
-	sp = ALIGN(dma_addr, WHCI_PAGE_SIZE);
-	ep = dma_addr + std_len;
+	/* Short buffers don't need a page list. */
+	if (std->len <= WHCI_PAGE_SIZE) {
+		std->num_pointers = 0;
+		return 0;
+	}
+
+	sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
+	ep = dma_addr + std->len;
 	std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
 
 	pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
@@ -309,7 +379,7 @@
 
 	for (p = 0; p < std->num_pointers; p++) {
 		std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
-		dma_addr = ALIGN(dma_addr + WHCI_PAGE_SIZE, WHCI_PAGE_SIZE);
+		dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
 	}
 
 	return 0;
@@ -339,6 +409,218 @@
 	spin_unlock_irqrestore(&whc->lock, flags);
 }
 
+static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset,
+				    struct urb *urb, gfp_t mem_flags)
+{
+	struct whc_std *std;
+
+	std = kzalloc(sizeof(struct whc_std), mem_flags);
+	if (std == NULL)
+		return NULL;
+
+	std->urb = urb;
+	std->qtd = NULL;
+
+	INIT_LIST_HEAD(&std->list_node);
+	list_add_tail(&std->list_node, &qset->stds);
+
+	return std;
+}
+
+static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb,
+			   gfp_t mem_flags)
+{
+	size_t remaining;
+	struct scatterlist *sg;
+	int i;
+	int ntds = 0;
+	struct whc_std *std = NULL;
+	struct whc_page_list_entry *entry;
+	dma_addr_t prev_end = 0;
+	size_t pl_len;
+	int p = 0;
+
+	remaining = urb->transfer_buffer_length;
+
+	for_each_sg(urb->sg->sg, sg, urb->num_sgs, i) {
+		dma_addr_t dma_addr;
+		size_t dma_remaining;
+		dma_addr_t sp, ep;
+		int num_pointers;
+
+		if (remaining == 0) {
+			break;
+		}
+
+		dma_addr = sg_dma_address(sg);
+		dma_remaining = min_t(size_t, sg_dma_len(sg), remaining);
+
+		while (dma_remaining) {
+			size_t dma_len;
+
+			/*
+			 * We can use the previous std (if it exists) provided that:
+			 * - the previous one ended on a page boundary.
+			 * - the current one begins on a page boundary.
+			 * - the previous one isn't full.
+			 *
+			 * If a new std is needed but the previous one
+			 * was not a whole number of packets then this
+			 * sg list cannot be mapped onto multiple
+			 * qTDs.  Return an error and let the caller
+			 * sort it out.
+			 */
+			if (!std
+			    || (prev_end & (WHCI_PAGE_SIZE-1))
+			    || (dma_addr & (WHCI_PAGE_SIZE-1))
+			    || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) {
+				if (std->len % qset->max_packet != 0)
+					return -EINVAL;
+				std = qset_new_std(whc, qset, urb, mem_flags);
+				if (std == NULL) {
+					return -ENOMEM;
+				}
+				ntds++;
+				p = 0;
+			}
+
+			dma_len = dma_remaining;
+
+			/*
+			 * If the remainder of this element doesn't
+			 * fit in a single qTD, limit the qTD to a
+			 * whole number of packets.  This allows the
+			 * remainder to go into the next qTD.
+			 */
+			if (std->len + dma_len > QTD_MAX_XFER_SIZE) {
+				dma_len = (QTD_MAX_XFER_SIZE / qset->max_packet)
+					* qset->max_packet - std->len;
+			}
+
+			std->len += dma_len;
+			std->ntds_remaining = -1; /* filled in later */
+
+			sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
+			ep = dma_addr + dma_len;
+			num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
+			std->num_pointers += num_pointers;
+
+			pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
+
+			std->pl_virt = krealloc(std->pl_virt, pl_len, mem_flags);
+			if (std->pl_virt == NULL) {
+				return -ENOMEM;
+			}
+
+			for (;p < std->num_pointers; p++, entry++) {
+				std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
+				dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
+			}
+
+			prev_end = dma_addr = ep;
+			dma_remaining -= dma_len;
+			remaining -= dma_len;
+		}
+	}
+
+	/* Now the number of stds is know, go back and fill in
+	   std->ntds_remaining. */
+	list_for_each_entry(std, &qset->stds, list_node) {
+		if (std->ntds_remaining == -1) {
+			pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
+			std->ntds_remaining = ntds--;
+			std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt,
+						       pl_len, DMA_TO_DEVICE);
+		}
+	}
+	return 0;
+}
+
+/**
+ * qset_add_urb_sg_linearize - add an urb with sg list, copying the data
+ *
+ * If the URB contains an sg list whose elements cannot be directly
+ * mapped to qTDs then the data must be transferred via bounce
+ * buffers.
+ */
+static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
+				     struct urb *urb, gfp_t mem_flags)
+{
+	bool is_out = usb_pipeout(urb->pipe);
+	size_t max_std_len;
+	size_t remaining;
+	int ntds = 0;
+	struct whc_std *std = NULL;
+	void *bounce = NULL;
+	struct scatterlist *sg;
+	int i;
+
+	/* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */
+	max_std_len = qset->max_burst * qset->max_packet;
+
+	remaining = urb->transfer_buffer_length;
+
+	for_each_sg(urb->sg->sg, sg, urb->sg->nents, i) {
+		size_t len;
+		size_t sg_remaining;
+		void *orig;
+
+		if (remaining == 0) {
+			break;
+		}
+
+		sg_remaining = min_t(size_t, remaining, sg->length);
+		orig = sg_virt(sg);
+
+		while (sg_remaining) {
+			if (!std || std->len == max_std_len) {
+				std = qset_new_std(whc, qset, urb, mem_flags);
+				if (std == NULL)
+					return -ENOMEM;
+				std->bounce_buf = kmalloc(max_std_len, mem_flags);
+				if (std->bounce_buf == NULL)
+					return -ENOMEM;
+				std->bounce_sg = sg;
+				std->bounce_offset = orig - sg_virt(sg);
+				bounce = std->bounce_buf;
+				ntds++;
+			}
+
+			len = min(sg_remaining, max_std_len - std->len);
+
+			if (is_out)
+				memcpy(bounce, orig, len);
+
+			std->len += len;
+			std->ntds_remaining = -1; /* filled in later */
+
+			bounce += len;
+			orig += len;
+			sg_remaining -= len;
+			remaining -= len;
+		}
+	}
+
+	/*
+	 * For each of the new sTDs, map the bounce buffers, create
+	 * page lists (if necessary), and fill in std->ntds_remaining.
+	 */
+	list_for_each_entry(std, &qset->stds, list_node) {
+		if (std->ntds_remaining != -1)
+			continue;
+
+		std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len,
+					       is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+		if (qset_fill_page_list(whc, std, mem_flags) < 0)
+			return -ENOMEM;
+
+		std->ntds_remaining = ntds--;
+	}
+
+	return 0;
+}
+
 /**
  * qset_add_urb - add an urb to the qset's queue.
  *
@@ -353,10 +635,7 @@
 	int remaining = urb->transfer_buffer_length;
 	u64 transfer_dma = urb->transfer_dma;
 	int ntds_remaining;
-
-	ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
-	if (ntds_remaining == 0)
-		ntds_remaining = 1;
+	int ret;
 
 	wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
 	if (wurb == NULL)
@@ -366,32 +645,39 @@
 	wurb->urb = urb;
 	INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
 
+	if (urb->sg) {
+		ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
+		if (ret == -EINVAL) {
+			qset_free_stds(qset, urb);
+			ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags);
+		}
+		if (ret < 0)
+			goto err_no_mem;
+		return 0;
+	}
+
+	ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
+	if (ntds_remaining == 0)
+		ntds_remaining = 1;
+
 	while (ntds_remaining) {
 		struct whc_std *std;
 		size_t std_len;
 
-		std = kmalloc(sizeof(struct whc_std), mem_flags);
-		if (std == NULL)
-			goto err_no_mem;
-
 		std_len = remaining;
 		if (std_len > QTD_MAX_XFER_SIZE)
 			std_len = QTD_MAX_XFER_SIZE;
 
-		std->urb = urb;
+		std = qset_new_std(whc, qset, urb, mem_flags);
+		if (std == NULL)
+			goto err_no_mem;
+
 		std->dma_addr = transfer_dma;
 		std->len = std_len;
 		std->ntds_remaining = ntds_remaining;
-		std->qtd = NULL;
 
-		INIT_LIST_HEAD(&std->list_node);
-		list_add_tail(&std->list_node, &qset->stds);
-
-		if (std_len > WHCI_PAGE_SIZE) {
-			if (qset_fill_page_list(whc, std, mem_flags) < 0)
-				goto err_no_mem;
-		} else
-			std->num_pointers = 0;
+		if (qset_fill_page_list(whc, std, mem_flags) < 0)
+			goto err_no_mem;
 
 		ntds_remaining--;
 		remaining -= std_len;
diff --git a/drivers/usb/host/whci/whcd.h b/drivers/usb/host/whci/whcd.h
index 24e94d9..c80c7d9 100644
--- a/drivers/usb/host/whci/whcd.h
+++ b/drivers/usb/host/whci/whcd.h
@@ -84,6 +84,11 @@
  * @len: the length of data in the associated TD.
  * @ntds_remaining: number of TDs (starting from this one) in this transfer.
  *
+ * @bounce_buf: a bounce buffer if the std was from an urb with a sg
+ * list that could not be mapped to qTDs directly.
+ * @bounce_sg: the first scatterlist element bounce_buf is for.
+ * @bounce_offset: the offset into bounce_sg for the start of bounce_buf.
+ *
  * Queued URBs may require more TDs than are available in a qset so we
  * use a list of these "software TDs" (sTDs) to hold per-TD data.
  */
@@ -97,6 +102,10 @@
 	int num_pointers;
 	dma_addr_t dma_addr;
 	struct whc_page_list_entry *pl_virt;
+
+	void *bounce_buf;
+	struct scatterlist *bounce_sg;
+	unsigned bounce_offset;
 };
 
 /**
diff --git a/drivers/usb/host/whci/whci-hc.h b/drivers/usb/host/whci/whci-hc.h
index e8d0001..4d4cbc0 100644
--- a/drivers/usb/host/whci/whci-hc.h
+++ b/drivers/usb/host/whci/whci-hc.h
@@ -172,14 +172,7 @@
 #define QH_INFO3_MAX_DELAY(d)    ((d) << 0)  /* maximum stream delay in 125 us units (isoc only) */
 #define QH_INFO3_INTERVAL(i)     ((i) << 16) /* segment interval in 125 us units (isoc only) */
 
-#define QH_INFO3_TX_RATE_53_3    (0 << 24)
-#define QH_INFO3_TX_RATE_80      (1 << 24)
-#define QH_INFO3_TX_RATE_106_7   (2 << 24)
-#define QH_INFO3_TX_RATE_160     (3 << 24)
-#define QH_INFO3_TX_RATE_200     (4 << 24)
-#define QH_INFO3_TX_RATE_320     (5 << 24)
-#define QH_INFO3_TX_RATE_400     (6 << 24)
-#define QH_INFO3_TX_RATE_480     (7 << 24)
+#define QH_INFO3_TX_RATE(r)      ((r) << 24) /* PHY rate (see [ECMA-368] section 10.3.1.1) */
 #define QH_INFO3_TX_PWR(p)       ((p) << 29) /* transmit power (see [WUSB] section 5.2.1.2) */
 
 #define QH_STATUS_FLOW_CTRL      (1 << 15)
@@ -267,8 +260,9 @@
 	unsigned reset:1;
 	struct urb *pause_after_urb;
 	struct completion remove_complete;
-	int max_burst;
-	int max_seq;
+	uint16_t max_packet;
+	uint8_t max_burst;
+	uint8_t max_seq;
 };
 
 static inline void whc_qset_set_link_ptr(u64 *ptr, u64 target)
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index 932f999..5e92c72 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -67,6 +67,25 @@
 }
 
 /*
+ * Disable interrupts and begin the xHCI halting process.
+ */
+void xhci_quiesce(struct xhci_hcd *xhci)
+{
+	u32 halted;
+	u32 cmd;
+	u32 mask;
+
+	mask = ~(XHCI_IRQS);
+	halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
+	if (!halted)
+		mask &= ~CMD_RUN;
+
+	cmd = xhci_readl(xhci, &xhci->op_regs->command);
+	cmd &= mask;
+	xhci_writel(xhci, cmd, &xhci->op_regs->command);
+}
+
+/*
  * Force HC into halt state.
  *
  * Disable any IRQs and clear the run/stop bit.
@@ -77,20 +96,8 @@
  */
 int xhci_halt(struct xhci_hcd *xhci)
 {
-	u32 halted;
-	u32 cmd;
-	u32 mask;
-
 	xhci_dbg(xhci, "// Halt the HC\n");
-	/* Disable all interrupts from the host controller */
-	mask = ~(XHCI_IRQS);
-	halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
-	if (!halted)
-		mask &= ~CMD_RUN;
-
-	cmd = xhci_readl(xhci, &xhci->op_regs->command);
-	cmd &= mask;
-	xhci_writel(xhci, cmd, &xhci->op_regs->command);
+	xhci_quiesce(xhci);
 
 	return handshake(xhci, &xhci->op_regs->status,
 			STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
@@ -124,28 +131,6 @@
 	return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000);
 }
 
-/*
- * Stop the HC from processing the endpoint queues.
- */
-static void xhci_quiesce(struct xhci_hcd *xhci)
-{
-	/*
-	 * Queues are per endpoint, so we need to disable an endpoint or slot.
-	 *
-	 * To disable a slot, we need to insert a disable slot command on the
-	 * command ring and ring the doorbell.  This will also free any internal
-	 * resources associated with the slot (which might not be what we want).
-	 *
-	 * A Release Endpoint command sounds better - doesn't free internal HC
-	 * memory, but removes the endpoints from the schedule and releases the
-	 * bandwidth, disables the doorbells, and clears the endpoint enable
-	 * flag.  Usually used prior to a set interface command.
-	 *
-	 * TODO: Implement after command ring code is done.
-	 */
-	BUG_ON(!HC_IS_RUNNING(xhci_to_hcd(xhci)->state));
-	xhci_dbg(xhci, "Finished quiescing -- code not written yet\n");
-}
 
 #if 0
 /* Set up MSI-X table for entry 0 (may claim other entries later) */
@@ -261,8 +246,14 @@
 	/* Flush posted writes */
 	xhci_readl(xhci, &xhci->ir_set->irq_pending);
 
-	/* FIXME this should be a delayed service routine that clears the EHB */
-	xhci_handle_event(xhci);
+	if (xhci->xhc_state & XHCI_STATE_DYING)
+		xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
+				"Shouldn't IRQs be disabled?\n");
+	else
+		/* FIXME this should be a delayed service routine
+		 * that clears the EHB.
+		 */
+		xhci_handle_event(xhci);
 
 	/* Clear the event handler busy flag (RW1C); the event ring should be empty. */
 	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
@@ -335,7 +326,7 @@
 	spin_lock_irqsave(&xhci->lock, flags);
 	temp = xhci_readl(xhci, &xhci->op_regs->status);
 	xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
-	if (temp == 0xffffffff) {
+	if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
 		xhci_dbg(xhci, "HW died, polling stopped.\n");
 		spin_unlock_irqrestore(&xhci->lock, flags);
 		return;
@@ -490,8 +481,6 @@
 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 
 	spin_lock_irq(&xhci->lock);
-	if (HC_IS_RUNNING(hcd->state))
-		xhci_quiesce(xhci);
 	xhci_halt(xhci);
 	xhci_reset(xhci);
 	spin_unlock_irq(&xhci->lock);
@@ -727,16 +716,22 @@
 		 * atomic context to this function, which may allocate memory.
 		 */
 		spin_lock_irqsave(&xhci->lock, flags);
+		if (xhci->xhc_state & XHCI_STATE_DYING)
+			goto dying;
 		ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
 				slot_id, ep_index);
 		spin_unlock_irqrestore(&xhci->lock, flags);
 	} else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
 		spin_lock_irqsave(&xhci->lock, flags);
+		if (xhci->xhc_state & XHCI_STATE_DYING)
+			goto dying;
 		ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
 				slot_id, ep_index);
 		spin_unlock_irqrestore(&xhci->lock, flags);
 	} else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
 		spin_lock_irqsave(&xhci->lock, flags);
+		if (xhci->xhc_state & XHCI_STATE_DYING)
+			goto dying;
 		ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
 				slot_id, ep_index);
 		spin_unlock_irqrestore(&xhci->lock, flags);
@@ -745,6 +740,12 @@
 	}
 exit:
 	return ret;
+dying:
+	xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
+			"non-responsive xHCI host.\n",
+			urb->ep->desc.bEndpointAddress, urb);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+	return -ESHUTDOWN;
 }
 
 /*
@@ -806,6 +807,17 @@
 		kfree(td);
 		return ret;
 	}
+	if (xhci->xhc_state & XHCI_STATE_DYING) {
+		xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
+				"non-responsive xHCI host.\n",
+				urb->ep->desc.bEndpointAddress, urb);
+		/* Let the stop endpoint command watchdog timer (which set this
+		 * state) finish cleaning up the endpoint TD lists.  We must
+		 * have caught it in the middle of dropping a lock and giving
+		 * back an URB.
+		 */
+		goto done;
+	}
 
 	xhci_dbg(xhci, "Cancel URB %p\n", urb);
 	xhci_dbg(xhci, "Event ring:\n");
@@ -817,12 +829,16 @@
 	xhci_debug_ring(xhci, ep_ring);
 	td = (struct xhci_td *) urb->hcpriv;
 
-	ep->cancels_pending++;
 	list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
 	/* Queue a stop endpoint command, but only if this is
 	 * the first cancellation to be handled.
 	 */
-	if (ep->cancels_pending == 1) {
+	if (!(ep->ep_state & EP_HALT_PENDING)) {
+		ep->ep_state |= EP_HALT_PENDING;
+		ep->stop_cmds_pending++;
+		ep->stop_cmd_timer.expires = jiffies +
+			XHCI_STOP_EP_CMD_TIMEOUT * HZ;
+		add_timer(&ep->stop_cmd_timer);
 		xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index);
 		xhci_ring_cmd_db(xhci);
 	}
@@ -1246,13 +1262,35 @@
 			LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
 
 	xhci_zero_in_ctx(xhci, virt_dev);
-	/* Free any old rings */
+	/* Install new rings and free or cache any old rings */
 	for (i = 1; i < 31; ++i) {
-		if (virt_dev->eps[i].new_ring) {
-			xhci_ring_free(xhci, virt_dev->eps[i].ring);
-			virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
-			virt_dev->eps[i].new_ring = NULL;
+		int rings_cached;
+
+		if (!virt_dev->eps[i].new_ring)
+			continue;
+		/* Only cache or free the old ring if it exists.
+		 * It may not if this is the first add of an endpoint.
+		 */
+		if (virt_dev->eps[i].ring) {
+			rings_cached = virt_dev->num_rings_cached;
+			if (rings_cached < XHCI_MAX_RINGS_CACHED) {
+				virt_dev->num_rings_cached++;
+				rings_cached = virt_dev->num_rings_cached;
+				virt_dev->ring_cache[rings_cached] =
+					virt_dev->eps[i].ring;
+				xhci_dbg(xhci, "Cached old ring, "
+						"%d ring%s cached\n",
+						rings_cached,
+						(rings_cached > 1) ? "s" : "");
+			} else {
+				xhci_ring_free(xhci, virt_dev->eps[i].ring);
+				xhci_dbg(xhci, "Ring cache full (%d rings), "
+						"freeing ring\n",
+						virt_dev->num_rings_cached);
+			}
 		}
+		virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
+		virt_dev->eps[i].new_ring = NULL;
 	}
 
 	return ret;
@@ -1427,16 +1465,27 @@
 void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
 {
 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	struct xhci_virt_device *virt_dev;
 	unsigned long flags;
 	u32 state;
+	int i;
 
 	if (udev->slot_id == 0)
 		return;
+	virt_dev = xhci->devs[udev->slot_id];
+	if (!virt_dev)
+		return;
+
+	/* Stop any wayward timer functions (which may grab the lock) */
+	for (i = 0; i < 31; ++i) {
+		virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
+		del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
+	}
 
 	spin_lock_irqsave(&xhci->lock, flags);
 	/* Don't disable the slot if the host controller is dead. */
 	state = xhci_readl(xhci, &xhci->op_regs->status);
-	if (state == 0xffffffff) {
+	if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
 		xhci_free_virt_device(xhci, udev->slot_id);
 		spin_unlock_irqrestore(&xhci->lock, flags);
 		return;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index b8fd270..bffcef7 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -125,6 +125,23 @@
 	kfree(ring);
 }
 
+static void xhci_initialize_ring_info(struct xhci_ring *ring)
+{
+	/* The ring is empty, so the enqueue pointer == dequeue pointer */
+	ring->enqueue = ring->first_seg->trbs;
+	ring->enq_seg = ring->first_seg;
+	ring->dequeue = ring->enqueue;
+	ring->deq_seg = ring->first_seg;
+	/* The ring is initialized to 0. The producer must write 1 to the cycle
+	 * bit to handover ownership of the TRB, so PCS = 1.  The consumer must
+	 * compare CCS to the cycle bit to check ownership, so CCS = 1.
+	 */
+	ring->cycle_state = 1;
+	/* Not necessary for new rings, but needed for re-initialized rings */
+	ring->enq_updates = 0;
+	ring->deq_updates = 0;
+}
+
 /**
  * Create a new ring with zero or more segments.
  *
@@ -173,17 +190,7 @@
 				" segment %p (virtual), 0x%llx (DMA)\n",
 				prev, (unsigned long long)prev->dma);
 	}
-	/* The ring is empty, so the enqueue pointer == dequeue pointer */
-	ring->enqueue = ring->first_seg->trbs;
-	ring->enq_seg = ring->first_seg;
-	ring->dequeue = ring->enqueue;
-	ring->deq_seg = ring->first_seg;
-	/* The ring is initialized to 0. The producer must write 1 to the cycle
-	 * bit to handover ownership of the TRB, so PCS = 1.  The consumer must
-	 * compare CCS to the cycle bit to check ownership, so CCS = 1.
-	 */
-	ring->cycle_state = 1;
-
+	xhci_initialize_ring_info(ring);
 	return ring;
 
 fail:
@@ -191,6 +198,27 @@
 	return 0;
 }
 
+/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
+ * pointers to the beginning of the ring.
+ */
+static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
+		struct xhci_ring *ring)
+{
+	struct xhci_segment	*seg = ring->first_seg;
+	do {
+		memset(seg->trbs, 0,
+				sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
+		/* All endpoint rings have link TRBs */
+		xhci_link_segments(xhci, seg, seg->next, 1);
+		seg = seg->next;
+	} while (seg != ring->first_seg);
+	xhci_initialize_ring_info(ring);
+	/* td list should be empty since all URBs have been cancelled,
+	 * but just in case...
+	 */
+	INIT_LIST_HEAD(&ring->td_list);
+}
+
 #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
 
 struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
@@ -248,6 +276,15 @@
 		(ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
 }
 
+static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
+		struct xhci_virt_ep *ep)
+{
+	init_timer(&ep->stop_cmd_timer);
+	ep->stop_cmd_timer.data = (unsigned long) ep;
+	ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog;
+	ep->xhci = xhci;
+}
+
 /* All the xhci_tds in the ring's TD list should be freed at this point */
 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
 {
@@ -267,6 +304,12 @@
 		if (dev->eps[i].ring)
 			xhci_ring_free(xhci, dev->eps[i].ring);
 
+	if (dev->ring_cache) {
+		for (i = 0; i < dev->num_rings_cached; i++)
+			xhci_ring_free(xhci, dev->ring_cache[i]);
+		kfree(dev->ring_cache);
+	}
+
 	if (dev->in_ctx)
 		xhci_free_container_ctx(xhci, dev->in_ctx);
 	if (dev->out_ctx)
@@ -309,15 +352,25 @@
 	xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
 			(unsigned long long)dev->in_ctx->dma);
 
-	/* Initialize the cancellation list for each endpoint */
-	for (i = 0; i < 31; i++)
+	/* Initialize the cancellation list and watchdog timers for each ep */
+	for (i = 0; i < 31; i++) {
+		xhci_init_endpoint_timer(xhci, &dev->eps[i]);
 		INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
+	}
 
 	/* Allocate endpoint 0 ring */
 	dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags);
 	if (!dev->eps[0].ring)
 		goto fail;
 
+	/* Allocate pointers to the ring cache */
+	dev->ring_cache = kzalloc(
+			sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
+			flags);
+	if (!dev->ring_cache)
+		goto fail;
+	dev->num_rings_cached = 0;
+
 	init_completion(&dev->cmd_completion);
 	INIT_LIST_HEAD(&dev->cmd_list);
 
@@ -544,8 +597,16 @@
 	/* Set up the endpoint ring */
 	virt_dev->eps[ep_index].new_ring =
 		xhci_ring_alloc(xhci, 1, true, mem_flags);
-	if (!virt_dev->eps[ep_index].new_ring)
-		return -ENOMEM;
+	if (!virt_dev->eps[ep_index].new_ring) {
+		/* Attempt to use the ring cache */
+		if (virt_dev->num_rings_cached == 0)
+			return -ENOMEM;
+		virt_dev->eps[ep_index].new_ring =
+			virt_dev->ring_cache[virt_dev->num_rings_cached];
+		virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
+		virt_dev->num_rings_cached--;
+		xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring);
+	}
 	ep_ring = virt_dev->eps[ep_index].new_ring;
 	ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
 
@@ -768,14 +829,17 @@
 
 	command->in_ctx =
 		xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags);
-	if (!command->in_ctx)
+	if (!command->in_ctx) {
+		kfree(command);
 		return NULL;
+	}
 
 	if (allocate_completion) {
 		command->completion =
 			kzalloc(sizeof(struct completion), mem_flags);
 		if (!command->completion) {
 			xhci_free_container_ctx(xhci, command->in_ctx);
+			kfree(command);
 			return NULL;
 		}
 		init_completion(command->completion);
@@ -848,6 +912,163 @@
 	xhci->page_shift = 0;
 }
 
+static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
+		struct xhci_segment *input_seg,
+		union xhci_trb *start_trb,
+		union xhci_trb *end_trb,
+		dma_addr_t input_dma,
+		struct xhci_segment *result_seg,
+		char *test_name, int test_number)
+{
+	unsigned long long start_dma;
+	unsigned long long end_dma;
+	struct xhci_segment *seg;
+
+	start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
+	end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
+
+	seg = trb_in_td(input_seg, start_trb, end_trb, input_dma);
+	if (seg != result_seg) {
+		xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
+				test_name, test_number);
+		xhci_warn(xhci, "Tested TRB math w/ seg %p and "
+				"input DMA 0x%llx\n",
+				input_seg,
+				(unsigned long long) input_dma);
+		xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
+				"ending TRB %p (0x%llx DMA)\n",
+				start_trb, start_dma,
+				end_trb, end_dma);
+		xhci_warn(xhci, "Expected seg %p, got seg %p\n",
+				result_seg, seg);
+		return -1;
+	}
+	return 0;
+}
+
+/* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
+static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
+{
+	struct {
+		dma_addr_t		input_dma;
+		struct xhci_segment	*result_seg;
+	} simple_test_vector [] = {
+		/* A zeroed DMA field should fail */
+		{ 0, NULL },
+		/* One TRB before the ring start should fail */
+		{ xhci->event_ring->first_seg->dma - 16, NULL },
+		/* One byte before the ring start should fail */
+		{ xhci->event_ring->first_seg->dma - 1, NULL },
+		/* Starting TRB should succeed */
+		{ xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
+		/* Ending TRB should succeed */
+		{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
+			xhci->event_ring->first_seg },
+		/* One byte after the ring end should fail */
+		{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
+		/* One TRB after the ring end should fail */
+		{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
+		/* An address of all ones should fail */
+		{ (dma_addr_t) (~0), NULL },
+	};
+	struct {
+		struct xhci_segment	*input_seg;
+		union xhci_trb		*start_trb;
+		union xhci_trb		*end_trb;
+		dma_addr_t		input_dma;
+		struct xhci_segment	*result_seg;
+	} complex_test_vector [] = {
+		/* Test feeding a valid DMA address from a different ring */
+		{	.input_seg = xhci->event_ring->first_seg,
+			.start_trb = xhci->event_ring->first_seg->trbs,
+			.end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
+			.input_dma = xhci->cmd_ring->first_seg->dma,
+			.result_seg = NULL,
+		},
+		/* Test feeding a valid end TRB from a different ring */
+		{	.input_seg = xhci->event_ring->first_seg,
+			.start_trb = xhci->event_ring->first_seg->trbs,
+			.end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
+			.input_dma = xhci->cmd_ring->first_seg->dma,
+			.result_seg = NULL,
+		},
+		/* Test feeding a valid start and end TRB from a different ring */
+		{	.input_seg = xhci->event_ring->first_seg,
+			.start_trb = xhci->cmd_ring->first_seg->trbs,
+			.end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
+			.input_dma = xhci->cmd_ring->first_seg->dma,
+			.result_seg = NULL,
+		},
+		/* TRB in this ring, but after this TD */
+		{	.input_seg = xhci->event_ring->first_seg,
+			.start_trb = &xhci->event_ring->first_seg->trbs[0],
+			.end_trb = &xhci->event_ring->first_seg->trbs[3],
+			.input_dma = xhci->event_ring->first_seg->dma + 4*16,
+			.result_seg = NULL,
+		},
+		/* TRB in this ring, but before this TD */
+		{	.input_seg = xhci->event_ring->first_seg,
+			.start_trb = &xhci->event_ring->first_seg->trbs[3],
+			.end_trb = &xhci->event_ring->first_seg->trbs[6],
+			.input_dma = xhci->event_ring->first_seg->dma + 2*16,
+			.result_seg = NULL,
+		},
+		/* TRB in this ring, but after this wrapped TD */
+		{	.input_seg = xhci->event_ring->first_seg,
+			.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
+			.end_trb = &xhci->event_ring->first_seg->trbs[1],
+			.input_dma = xhci->event_ring->first_seg->dma + 2*16,
+			.result_seg = NULL,
+		},
+		/* TRB in this ring, but before this wrapped TD */
+		{	.input_seg = xhci->event_ring->first_seg,
+			.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
+			.end_trb = &xhci->event_ring->first_seg->trbs[1],
+			.input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
+			.result_seg = NULL,
+		},
+		/* TRB not in this ring, and we have a wrapped TD */
+		{	.input_seg = xhci->event_ring->first_seg,
+			.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
+			.end_trb = &xhci->event_ring->first_seg->trbs[1],
+			.input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
+			.result_seg = NULL,
+		},
+	};
+
+	unsigned int num_tests;
+	int i, ret;
+
+	num_tests = sizeof(simple_test_vector) / sizeof(simple_test_vector[0]);
+	for (i = 0; i < num_tests; i++) {
+		ret = xhci_test_trb_in_td(xhci,
+				xhci->event_ring->first_seg,
+				xhci->event_ring->first_seg->trbs,
+				&xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
+				simple_test_vector[i].input_dma,
+				simple_test_vector[i].result_seg,
+				"Simple", i);
+		if (ret < 0)
+			return ret;
+	}
+
+	num_tests = sizeof(complex_test_vector) / sizeof(complex_test_vector[0]);
+	for (i = 0; i < num_tests; i++) {
+		ret = xhci_test_trb_in_td(xhci,
+				complex_test_vector[i].input_seg,
+				complex_test_vector[i].start_trb,
+				complex_test_vector[i].end_trb,
+				complex_test_vector[i].input_dma,
+				complex_test_vector[i].result_seg,
+				"Complex", i);
+		if (ret < 0)
+			return ret;
+	}
+	xhci_dbg(xhci, "TRB math tests passed.\n");
+	return 0;
+}
+
+
 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
 {
 	dma_addr_t	dma;
@@ -951,6 +1172,8 @@
 	xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
 	if (!xhci->event_ring)
 		goto fail;
+	if (xhci_check_trb_in_td_math(xhci, flags) < 0)
+		goto fail;
 
 	xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev),
 			sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 06595ec..e097008 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -54,6 +54,8 @@
 	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
 	int			retval;
 
+	hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 1;
+
 	xhci->cap_regs = hcd->regs;
 	xhci->op_regs = hcd->regs +
 		HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 821b7b4..ee7bc7e 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -306,7 +306,7 @@
 	/* Don't ring the doorbell for this endpoint if there are pending
 	 * cancellations because the we don't want to interrupt processing.
 	 */
-	if (!ep->cancels_pending && !(ep_state & SET_DEQ_PENDING)
+	if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING)
 			&& !(ep_state & EP_HALTED)) {
 		field = xhci_readl(xhci, db_addr) & DB_MASK;
 		xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
@@ -475,6 +475,35 @@
 	ep->ep_state |= SET_DEQ_PENDING;
 }
 
+static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
+		struct xhci_virt_ep *ep)
+{
+	ep->ep_state &= ~EP_HALT_PENDING;
+	/* Can't del_timer_sync in interrupt, so we attempt to cancel.  If the
+	 * timer is running on another CPU, we don't decrement stop_cmds_pending
+	 * (since we didn't successfully stop the watchdog timer).
+	 */
+	if (del_timer(&ep->stop_cmd_timer))
+		ep->stop_cmds_pending--;
+}
+
+/* Must be called with xhci->lock held in interrupt context */
+static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
+		struct xhci_td *cur_td, int status, char *adjective)
+{
+	struct usb_hcd *hcd = xhci_to_hcd(xhci);
+
+	cur_td->urb->hcpriv = NULL;
+	usb_hcd_unlink_urb_from_ep(hcd, cur_td->urb);
+	xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, cur_td->urb);
+
+	spin_unlock(&xhci->lock);
+	usb_hcd_giveback_urb(hcd, cur_td->urb, status);
+	kfree(cur_td);
+	spin_lock(&xhci->lock);
+	xhci_dbg(xhci, "%s URB given back\n", adjective);
+}
+
 /*
  * When we get a command completion for a Stop Endpoint Command, we need to
  * unlink any cancelled TDs from the ring.  There are two ways to do that:
@@ -497,9 +526,6 @@
 	struct xhci_td *last_unlinked_td;
 
 	struct xhci_dequeue_state deq_state;
-#ifdef CONFIG_USB_HCD_STAT
-	ktime_t stop_time = ktime_get();
-#endif
 
 	memset(&deq_state, 0, sizeof(deq_state));
 	slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
@@ -507,8 +533,11 @@
 	ep = &xhci->devs[slot_id]->eps[ep_index];
 	ep_ring = ep->ring;
 
-	if (list_empty(&ep->cancelled_td_list))
+	if (list_empty(&ep->cancelled_td_list)) {
+		xhci_stop_watchdog_timer_in_irq(xhci, ep);
+		ring_ep_doorbell(xhci, slot_id, ep_index);
 		return;
+	}
 
 	/* Fix up the ep ring first, so HW stops executing cancelled TDs.
 	 * We have the xHCI lock, so nothing can modify this list until we drop
@@ -535,9 +564,9 @@
 		 * the cancelled TD list for URB completion later.
 		 */
 		list_del(&cur_td->td_list);
-		ep->cancels_pending--;
 	}
 	last_unlinked_td = cur_td;
+	xhci_stop_watchdog_timer_in_irq(xhci, ep);
 
 	/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
 	if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
@@ -561,27 +590,136 @@
 		list_del(&cur_td->cancelled_td_list);
 
 		/* Clean up the cancelled URB */
-#ifdef CONFIG_USB_HCD_STAT
-		hcd_stat_update(xhci->tp_stat, cur_td->urb->actual_length,
-				ktime_sub(stop_time, cur_td->start_time));
-#endif
-		cur_td->urb->hcpriv = NULL;
-		usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), cur_td->urb);
-
-		xhci_dbg(xhci, "Giveback cancelled URB %p\n", cur_td->urb);
-		spin_unlock(&xhci->lock);
 		/* Doesn't matter what we pass for status, since the core will
 		 * just overwrite it (because the URB has been unlinked).
 		 */
-		usb_hcd_giveback_urb(xhci_to_hcd(xhci), cur_td->urb, 0);
-		kfree(cur_td);
+		xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
 
-		spin_lock(&xhci->lock);
+		/* Stop processing the cancelled list if the watchdog timer is
+		 * running.
+		 */
+		if (xhci->xhc_state & XHCI_STATE_DYING)
+			return;
 	} while (cur_td != last_unlinked_td);
 
 	/* Return to the event handler with xhci->lock re-acquired */
 }
 
+/* Watchdog timer function for when a stop endpoint command fails to complete.
+ * In this case, we assume the host controller is broken or dying or dead.  The
+ * host may still be completing some other events, so we have to be careful to
+ * let the event ring handler and the URB dequeueing/enqueueing functions know
+ * through xhci->state.
+ *
+ * The timer may also fire if the host takes a very long time to respond to the
+ * command, and the stop endpoint command completion handler cannot delete the
+ * timer before the timer function is called.  Another endpoint cancellation may
+ * sneak in before the timer function can grab the lock, and that may queue
+ * another stop endpoint command and add the timer back.  So we cannot use a
+ * simple flag to say whether there is a pending stop endpoint command for a
+ * particular endpoint.
+ *
+ * Instead we use a combination of that flag and a counter for the number of
+ * pending stop endpoint commands.  If the timer is the tail end of the last
+ * stop endpoint command, and the endpoint's command is still pending, we assume
+ * the host is dying.
+ */
+void xhci_stop_endpoint_command_watchdog(unsigned long arg)
+{
+	struct xhci_hcd *xhci;
+	struct xhci_virt_ep *ep;
+	struct xhci_virt_ep *temp_ep;
+	struct xhci_ring *ring;
+	struct xhci_td *cur_td;
+	int ret, i, j;
+
+	ep = (struct xhci_virt_ep *) arg;
+	xhci = ep->xhci;
+
+	spin_lock(&xhci->lock);
+
+	ep->stop_cmds_pending--;
+	if (xhci->xhc_state & XHCI_STATE_DYING) {
+		xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
+				"xHCI as DYING, exiting.\n");
+		spin_unlock(&xhci->lock);
+		return;
+	}
+	if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
+		xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
+				"exiting.\n");
+		spin_unlock(&xhci->lock);
+		return;
+	}
+
+	xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
+	xhci_warn(xhci, "Assuming host is dying, halting host.\n");
+	/* Oops, HC is dead or dying or at least not responding to the stop
+	 * endpoint command.
+	 */
+	xhci->xhc_state |= XHCI_STATE_DYING;
+	/* Disable interrupts from the host controller and start halting it */
+	xhci_quiesce(xhci);
+	spin_unlock(&xhci->lock);
+
+	ret = xhci_halt(xhci);
+
+	spin_lock(&xhci->lock);
+	if (ret < 0) {
+		/* This is bad; the host is not responding to commands and it's
+		 * not allowing itself to be halted.  At least interrupts are
+		 * disabled, so we can set HC_STATE_HALT and notify the
+		 * USB core.  But if we call usb_hc_died(), it will attempt to
+		 * disconnect all device drivers under this host.  Those
+		 * disconnect() methods will wait for all URBs to be unlinked,
+		 * so we must complete them.
+		 */
+		xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
+		xhci_warn(xhci, "Completing active URBs anyway.\n");
+		/* We could turn all TDs on the rings to no-ops.  This won't
+		 * help if the host has cached part of the ring, and is slow if
+		 * we want to preserve the cycle bit.  Skip it and hope the host
+		 * doesn't touch the memory.
+		 */
+	}
+	for (i = 0; i < MAX_HC_SLOTS; i++) {
+		if (!xhci->devs[i])
+			continue;
+		for (j = 0; j < 31; j++) {
+			temp_ep = &xhci->devs[i]->eps[j];
+			ring = temp_ep->ring;
+			if (!ring)
+				continue;
+			xhci_dbg(xhci, "Killing URBs for slot ID %u, "
+					"ep index %u\n", i, j);
+			while (!list_empty(&ring->td_list)) {
+				cur_td = list_first_entry(&ring->td_list,
+						struct xhci_td,
+						td_list);
+				list_del(&cur_td->td_list);
+				if (!list_empty(&cur_td->cancelled_td_list))
+					list_del(&cur_td->cancelled_td_list);
+				xhci_giveback_urb_in_irq(xhci, cur_td,
+						-ESHUTDOWN, "killed");
+			}
+			while (!list_empty(&temp_ep->cancelled_td_list)) {
+				cur_td = list_first_entry(
+						&temp_ep->cancelled_td_list,
+						struct xhci_td,
+						cancelled_td_list);
+				list_del(&cur_td->cancelled_td_list);
+				xhci_giveback_urb_in_irq(xhci, cur_td,
+						-ESHUTDOWN, "killed");
+			}
+		}
+	}
+	spin_unlock(&xhci->lock);
+	xhci_to_hcd(xhci)->state = HC_STATE_HALT;
+	xhci_dbg(xhci, "Calling usb_hc_died()\n");
+	usb_hc_died(xhci_to_hcd(xhci));
+	xhci_dbg(xhci, "xHCI host controller is dead.\n");
+}
+
 /*
  * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
  * we need to clear the set deq pending flag in the endpoint ring state, so that
@@ -765,28 +903,32 @@
 				virt_dev->in_ctx);
 		/* Input ctx add_flags are the endpoint index plus one */
 		ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1;
-		ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
-		if (!ep_ring) {
-			/* This must have been an initial configure endpoint */
-			xhci->devs[slot_id]->cmd_status =
-				GET_COMP_CODE(event->status);
-			complete(&xhci->devs[slot_id]->cmd_completion);
-			break;
-		}
-		ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
-		xhci_dbg(xhci, "Completed config ep cmd - last ep index = %d, "
-				"state = %d\n", ep_index, ep_state);
+		/* A usb_set_interface() call directly after clearing a halted
+		 * condition may race on this quirky hardware.
+		 * Not worth worrying about, since this is prototype hardware.
+		 */
 		if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
-				ep_state & EP_HALTED) {
+				ep_index != (unsigned int) -1 &&
+				ctrl_ctx->add_flags - SLOT_FLAG ==
+					ctrl_ctx->drop_flags) {
+			ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
+			ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
+			if (!(ep_state & EP_HALTED))
+				goto bandwidth_change;
+			xhci_dbg(xhci, "Completed config ep cmd - "
+					"last ep index = %d, state = %d\n",
+					ep_index, ep_state);
 			/* Clear our internal halted state and restart ring */
 			xhci->devs[slot_id]->eps[ep_index].ep_state &=
 				~EP_HALTED;
 			ring_ep_doorbell(xhci, slot_id, ep_index);
-		} else {
-			xhci->devs[slot_id]->cmd_status =
-				GET_COMP_CODE(event->status);
-			complete(&xhci->devs[slot_id]->cmd_completion);
+			break;
 		}
+bandwidth_change:
+		xhci_dbg(xhci, "Completed config ep cmd\n");
+		xhci->devs[slot_id]->cmd_status =
+			GET_COMP_CODE(event->status);
+		complete(&xhci->devs[slot_id]->cmd_completion);
 		break;
 	case TRB_TYPE(TRB_EVAL_CONTEXT):
 		virt_dev = xhci->devs[slot_id];
@@ -849,8 +991,7 @@
  * TRB in this TD, this function returns that TRB's segment.  Otherwise it
  * returns 0.
  */
-static struct xhci_segment *trb_in_td(
-		struct xhci_segment *start_seg,
+struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
 		union xhci_trb	*start_trb,
 		union xhci_trb	*end_trb,
 		dma_addr_t	suspect_dma)
@@ -900,6 +1041,45 @@
 	return 0;
 }
 
+static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
+		unsigned int slot_id, unsigned int ep_index,
+		struct xhci_td *td, union xhci_trb *event_trb)
+{
+	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
+	ep->ep_state |= EP_HALTED;
+	ep->stopped_td = td;
+	ep->stopped_trb = event_trb;
+	xhci_queue_reset_ep(xhci, slot_id, ep_index);
+	xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
+	xhci_ring_cmd_db(xhci);
+}
+
+/* Check if an error has halted the endpoint ring.  The class driver will
+ * cleanup the halt for a non-default control endpoint if we indicate a stall.
+ * However, a babble and other errors also halt the endpoint ring, and the class
+ * driver won't clear the halt in that case, so we need to issue a Set Transfer
+ * Ring Dequeue Pointer command manually.
+ */
+static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
+		struct xhci_ep_ctx *ep_ctx,
+		unsigned int trb_comp_code)
+{
+	/* TRB completion codes that may require a manual halt cleanup */
+	if (trb_comp_code == COMP_TX_ERR ||
+			trb_comp_code == COMP_BABBLE ||
+			trb_comp_code == COMP_SPLIT_ERR)
+		/* The 0.96 spec says a babbling control endpoint
+		 * is not halted. The 0.96 spec says it is.  Some HW
+		 * claims to be 0.95 compliant, but it halts the control
+		 * endpoint anyway.  Check if a babble halted the
+		 * endpoint.
+		 */
+		if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_HALTED)
+			return 1;
+
+	return 0;
+}
+
 /*
  * If this function returns an error condition, it means it got a Transfer
  * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
@@ -1002,6 +1182,7 @@
 		xhci_warn(xhci, "WARN: TRB error on endpoint\n");
 		status = -EILSEQ;
 		break;
+	case COMP_SPLIT_ERR:
 	case COMP_TX_ERR:
 		xhci_warn(xhci, "WARN: transfer error on endpoint\n");
 		status = -EPROTO;
@@ -1015,6 +1196,16 @@
 		status = -ENOSR;
 		break;
 	default:
+		if (trb_comp_code >= 224 && trb_comp_code <= 255) {
+			/* Vendor defined "informational" completion code,
+			 * treat as not-an-error.
+			 */
+			xhci_dbg(xhci, "Vendor defined info completion code %u\n",
+					trb_comp_code);
+			xhci_dbg(xhci, "Treating code as success.\n");
+			status = 0;
+			break;
+		}
 		xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n");
 		urb = NULL;
 		goto cleanup;
@@ -1043,15 +1234,14 @@
 			else
 				status = 0;
 			break;
-		case COMP_BABBLE:
-			/* The 0.96 spec says a babbling control endpoint
-			 * is not halted. The 0.96 spec says it is.  Some HW
-			 * claims to be 0.95 compliant, but it halts the control
-			 * endpoint anyway.  Check if a babble halted the
-			 * endpoint.
-			 */
-			if (ep_ctx->ep_info != EP_STATE_HALTED)
+
+		default:
+			if (!xhci_requires_manual_halt_cleanup(xhci,
+						ep_ctx, trb_comp_code))
 				break;
+			xhci_dbg(xhci, "TRB error code %u, "
+					"halted endpoint index = %u\n",
+					trb_comp_code, ep_index);
 			/* else fall through */
 		case COMP_STALL:
 			/* Did we transfer part of the data (middle) phase? */
@@ -1063,15 +1253,9 @@
 			else
 				td->urb->actual_length = 0;
 
-			ep->stopped_td = td;
-			ep->stopped_trb = event_trb;
-			xhci_queue_reset_ep(xhci, slot_id, ep_index);
-			xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
-			xhci_ring_cmd_db(xhci);
+			xhci_cleanup_halted_endpoint(xhci,
+					slot_id, ep_index, td, event_trb);
 			goto td_cleanup;
-		default:
-			/* Others already handled above */
-			break;
 		}
 		/*
 		 * Did we transfer any data, despite the errors that might have
@@ -1209,16 +1393,25 @@
 		ep->stopped_td = td;
 		ep->stopped_trb = event_trb;
 	} else {
-		if (trb_comp_code == COMP_STALL ||
-				trb_comp_code == COMP_BABBLE) {
+		if (trb_comp_code == COMP_STALL) {
 			/* The transfer is completed from the driver's
 			 * perspective, but we need to issue a set dequeue
 			 * command for this stalled endpoint to move the dequeue
 			 * pointer past the TD.  We can't do that here because
-			 * the halt condition must be cleared first.
+			 * the halt condition must be cleared first.  Let the
+			 * USB class driver clear the stall later.
 			 */
 			ep->stopped_td = td;
 			ep->stopped_trb = event_trb;
+		} else if (xhci_requires_manual_halt_cleanup(xhci,
+					ep_ctx, trb_comp_code)) {
+			/* Other types of errors halt the endpoint, but the
+			 * class driver doesn't call usb_reset_endpoint() unless
+			 * the error is -EPIPE.  Clear the halted status in the
+			 * xHCI hardware manually.
+			 */
+			xhci_cleanup_halted_endpoint(xhci,
+					slot_id, ep_index, td, event_trb);
 		} else {
 			/* Update ring dequeue pointer */
 			while (ep_ring->dequeue != td->last_trb)
@@ -1249,10 +1442,9 @@
 		}
 		list_del(&td->td_list);
 		/* Was this TD slated to be cancelled but completed anyway? */
-		if (!list_empty(&td->cancelled_td_list)) {
+		if (!list_empty(&td->cancelled_td_list))
 			list_del(&td->cancelled_td_list);
-			ep->cancels_pending--;
-		}
+
 		/* Leave the TD around for the reset endpoint function to use
 		 * (but only if it's not a control endpoint, since we already
 		 * queued the Set TR dequeue pointer command for stalled
@@ -1331,6 +1523,14 @@
 	default:
 		xhci->error_bitmask |= 1 << 3;
 	}
+	/* Any of the above functions may drop and re-acquire the lock, so check
+	 * to make sure a watchdog timer didn't mark the host as non-responsive.
+	 */
+	if (xhci->xhc_state & XHCI_STATE_DYING) {
+		xhci_dbg(xhci, "xHCI host dying, returning from "
+				"event handler.\n");
+		return;
+	}
 
 	if (update_ptrs) {
 		/* Update SW and HC event ring dequeue pointer */
@@ -1555,6 +1755,21 @@
 	return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
 }
 
+/*
+ * The TD size is the number of bytes remaining in the TD (including this TRB),
+ * right shifted by 10.
+ * It must fit in bits 21:17, so it can't be bigger than 31.
+ */
+static u32 xhci_td_remainder(unsigned int remainder)
+{
+	u32 max = (1 << (21 - 17 + 1)) - 1;
+
+	if ((remainder >> 10) >= max)
+		return max << 17;
+	else
+		return (remainder >> 10) << 17;
+}
+
 static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 		struct urb *urb, int slot_id, unsigned int ep_index)
 {
@@ -1612,6 +1827,7 @@
 	do {
 		u32 field = 0;
 		u32 length_field = 0;
+		u32 remainder = 0;
 
 		/* Don't change the cycle bit of the first TRB until later */
 		if (first_trb)
@@ -1641,8 +1857,10 @@
 					(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
 					(unsigned int) addr + trb_buff_len);
 		}
+		remainder = xhci_td_remainder(urb->transfer_buffer_length -
+				running_total) ;
 		length_field = TRB_LEN(trb_buff_len) |
-			TD_REMAINDER(urb->transfer_buffer_length - running_total) |
+			remainder |
 			TRB_INTR_TARGET(0);
 		queue_trb(xhci, ep_ring, false,
 				lower_32_bits(addr),
@@ -1755,6 +1973,7 @@
 
 	/* Queue the first TRB, even if it's zero-length */
 	do {
+		u32 remainder = 0;
 		field = 0;
 
 		/* Don't change the cycle bit of the first TRB until later */
@@ -1773,8 +1992,10 @@
 			td->last_trb = ep_ring->enqueue;
 			field |= TRB_IOC;
 		}
+		remainder = xhci_td_remainder(urb->transfer_buffer_length -
+				running_total);
 		length_field = TRB_LEN(trb_buff_len) |
-			TD_REMAINDER(urb->transfer_buffer_length - running_total) |
+			remainder |
 			TRB_INTR_TARGET(0);
 		queue_trb(xhci, ep_ring, false,
 				lower_32_bits(addr),
@@ -1862,7 +2083,7 @@
 	/* If there's data, queue data TRBs */
 	field = 0;
 	length_field = TRB_LEN(urb->transfer_buffer_length) |
-		TD_REMAINDER(urb->transfer_buffer_length) |
+		xhci_td_remainder(urb->transfer_buffer_length) |
 		TRB_INTR_TARGET(0);
 	if (urb->transfer_buffer_length > 0) {
 		if (setup->bRequestType & USB_DIR_IN)
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 4b254b6..8778135 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -652,13 +652,17 @@
 	struct xhci_ring		*new_ring;
 	unsigned int			ep_state;
 #define SET_DEQ_PENDING		(1 << 0)
-#define EP_HALTED		(1 << 1)
+#define EP_HALTED		(1 << 1)	/* For stall handling */
+#define EP_HALT_PENDING		(1 << 2)	/* For URB cancellation */
 	/* ----  Related to URB cancellation ---- */
 	struct list_head	cancelled_td_list;
-	unsigned int		cancels_pending;
 	/* The TRB that was last reported in a stopped endpoint ring */
 	union xhci_trb		*stopped_trb;
 	struct xhci_td		*stopped_td;
+	/* Watchdog timer for stop endpoint command to cancel URBs */
+	struct timer_list	stop_cmd_timer;
+	int			stop_cmds_pending;
+	struct xhci_hcd		*xhci;
 };
 
 struct xhci_virt_device {
@@ -673,6 +677,10 @@
 	struct xhci_container_ctx       *out_ctx;
 	/* Used for addressing devices and configuration changes */
 	struct xhci_container_ctx       *in_ctx;
+	/* Rings saved to ensure old alt settings can be re-instated */
+	struct xhci_ring		**ring_cache;
+	int				num_rings_cached;
+#define	XHCI_MAX_RINGS_CACHED	31
 	struct xhci_virt_ep		eps[31];
 	struct completion		cmd_completion;
 	/* Status of the last command issued for this device */
@@ -824,9 +832,6 @@
 /* Normal TRB fields */
 /* transfer_len bitmasks - bits 0:16 */
 #define	TRB_LEN(p)		((p) & 0x1ffff)
-/* TD size - number of bytes remaining in the TD (including this TRB):
- * bits 17 - 21.  Shift the number of bytes by 10. */
-#define TD_REMAINDER(p)		((((p) >> 10) & 0x1f) << 17)
 /* Interrupter Target - which MSI-X vector to target the completion event at */
 #define TRB_INTR_TARGET(p)	(((p) & 0x3ff) << 22)
 #define GET_INTR_TARGET(p)	(((p) >> 22) & 0x3ff)
@@ -1022,6 +1027,8 @@
 #define	ERST_ENTRIES	1
 /* Poll every 60 seconds */
 #define	POLL_TIMEOUT	60
+/* Stop endpoint command timeout (secs) for URB cancellation watchdog timer */
+#define XHCI_STOP_EP_CMD_TIMEOUT	5
 /* XXX: Make these module parameters */
 
 
@@ -1083,6 +1090,21 @@
 	struct timer_list	event_ring_timer;
 	int			zombie;
 #endif
+	/* Host controller watchdog timer structures */
+	unsigned int		xhc_state;
+/* Host controller is dying - not responding to commands. "I'm not dead yet!"
+ *
+ * xHC interrupts have been disabled and a watchdog timer will (or has already)
+ * halt the xHCI host, and complete all URBs with an -ESHUTDOWN code.  Any code
+ * that sees this status (other than the timer that set it) should stop touching
+ * hardware immediately.  Interrupt handlers should return immediately when
+ * they see this status (any time they drop and re-acquire xhci->lock).
+ * xhci_urb_dequeue() should call usb_hcd_check_unlink_urb() and return without
+ * putting the TD on the canceled list, etc.
+ *
+ * There are no reports of xHCI host controllers that display this issue.
+ */
+#define XHCI_STATE_DYING	(1 << 0)
 	/* Statistics */
 	int			noops_submitted;
 	int			noops_handled;
@@ -1223,6 +1245,7 @@
 #endif
 
 /* xHCI host controller glue */
+void xhci_quiesce(struct xhci_hcd *xhci);
 int xhci_halt(struct xhci_hcd *xhci);
 int xhci_reset(struct xhci_hcd *xhci);
 int xhci_init(struct usb_hcd *hcd);
@@ -1246,6 +1269,9 @@
 
 /* xHCI ring, segment, TRB, and TD functions */
 dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
+struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
+		union xhci_trb *start_trb, union xhci_trb *end_trb,
+		dma_addr_t suspect_dma);
 void xhci_ring_cmd_db(struct xhci_hcd *xhci);
 void *xhci_setup_one_noop(struct xhci_hcd *xhci);
 void xhci_handle_event(struct xhci_hcd *xhci);
@@ -1278,6 +1304,7 @@
 void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
 		unsigned int slot_id, unsigned int ep_index,
 		struct xhci_dequeue_state *deq_state);
+void xhci_stop_endpoint_command_watchdog(unsigned long arg);
 
 /* xHCI roothub code */
 int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index a9f06d7..3dab0c0 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -213,8 +213,9 @@
 }
 
 static unsigned pattern = 0;
-module_param (pattern, uint, S_IRUGO);
-MODULE_PARM_DESC(pattern, "i/o pattern (0 == zeroes)");
+static unsigned mod_pattern;
+module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)");
 
 static inline void simple_fill_buf (struct urb *urb)
 {
@@ -1567,6 +1568,8 @@
 
 	// FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is.
 
+	pattern = mod_pattern;
+
 	if (code != USBTEST_REQUEST)
 		return -EOPNOTSUPP;
 
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index 10f3205..385ec05 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -16,6 +16,7 @@
 #include <linux/compat.h>
 #include <linux/mm.h>
 #include <linux/smp_lock.h>
+#include <linux/scatterlist.h>
 
 #include <asm/uaccess.h>
 
@@ -221,7 +222,7 @@
 /*
  * This is a "chunked memcpy". It does not manipulate any counters.
  */
-static void mon_copy_to_buff(const struct mon_reader_bin *this,
+static unsigned int mon_copy_to_buff(const struct mon_reader_bin *this,
     unsigned int off, const unsigned char *from, unsigned int length)
 {
 	unsigned int step_len;
@@ -246,6 +247,7 @@
 		from += step_len;
 		length -= step_len;
 	}
+	return off;
 }
 
 /*
@@ -394,14 +396,44 @@
 	return 0;
 }
 
-static char mon_bin_get_data(const struct mon_reader_bin *rp,
-    unsigned int offset, struct urb *urb, unsigned int length)
+static unsigned int mon_bin_get_data(const struct mon_reader_bin *rp,
+    unsigned int offset, struct urb *urb, unsigned int length,
+    char *flag)
 {
+	int i;
+	struct scatterlist *sg;
+	unsigned int this_len;
 
-	if (urb->transfer_buffer == NULL)
-		return 'Z';
-	mon_copy_to_buff(rp, offset, urb->transfer_buffer, length);
-	return 0;
+	*flag = 0;
+	if (urb->num_sgs == 0) {
+		if (urb->transfer_buffer == NULL) {
+			*flag = 'Z';
+			return length;
+		}
+		mon_copy_to_buff(rp, offset, urb->transfer_buffer, length);
+		length = 0;
+
+	} else {
+		/* If IOMMU coalescing occurred, we cannot trust sg_page */
+		if (urb->sg->nents != urb->num_sgs) {
+			*flag = 'D';
+			return length;
+		}
+
+		/* Copy up to the first non-addressable segment */
+		for_each_sg(urb->sg->sg, sg, urb->num_sgs, i) {
+			if (length == 0 || PageHighMem(sg_page(sg)))
+				break;
+			this_len = min_t(unsigned int, sg->length, length);
+			offset = mon_copy_to_buff(rp, offset, sg_virt(sg),
+					this_len);
+			length -= this_len;
+		}
+		if (i == 0)
+			*flag = 'D';
+	}
+
+	return length;
 }
 
 static void mon_bin_get_isodesc(const struct mon_reader_bin *rp,
@@ -536,8 +568,9 @@
 	}
 
 	if (length != 0) {
-		ep->flag_data = mon_bin_get_data(rp, offset, urb, length);
-		if (ep->flag_data != 0) {	/* Yes, it's 0x00, not '0' */
+		length = mon_bin_get_data(rp, offset, urb, length,
+				&ep->flag_data);
+		if (length > 0) {
 			delta = (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
 			ep->len_cap -= length;
 			delta -= (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 9f1a922..047568f 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -10,6 +10,7 @@
 #include <linux/time.h>
 #include <linux/mutex.h>
 #include <linux/debugfs.h>
+#include <linux/scatterlist.h>
 #include <asm/uaccess.h>
 
 #include "usb_mon.h"
@@ -137,6 +138,8 @@
 static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb,
     int len, char ev_type, struct mon_bus *mbus)
 {
+	void *src;
+
 	if (len <= 0)
 		return 'L';
 	if (len >= DATA_MAX)
@@ -150,10 +153,24 @@
 			return '>';
 	}
 
-	if (urb->transfer_buffer == NULL)
-		return 'Z';	/* '0' would be not as pretty. */
+	if (urb->num_sgs == 0) {
+		src = urb->transfer_buffer;
+		if (src == NULL)
+			return 'Z';	/* '0' would be not as pretty. */
+	} else {
+		struct scatterlist *sg = urb->sg->sg;
 
-	memcpy(ep->data, urb->transfer_buffer, len);
+		/* If IOMMU coalescing occurred, we cannot trust sg_page */
+		if (urb->sg->nents != urb->num_sgs ||
+				PageHighMem(sg_page(sg)))
+			return 'D';
+
+		/* For the text interface we copy only the first sg buffer */
+		len = min_t(int, sg->length, len);
+		src = sg_virt(sg);
+	}
+
+	memcpy(ep->data, src, len);
 	return 0;
 }
 
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index b84abd8..d9db864 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -9,10 +9,9 @@
 # (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
 config USB_MUSB_HDRC
 	depends on (USB || USB_GADGET)
-	depends on (ARM || BLACKFIN)
-	select NOP_USB_XCEIV if ARCH_DAVINCI
+	depends on (ARM || (BF54x && !BF544) || (BF52x && !BF522 && !BF523))
+	select NOP_USB_XCEIV if (ARCH_DAVINCI || MACH_OMAP3EVM || BLACKFIN)
 	select TWL4030_USB if MACH_OMAP_3430SDP
-	select NOP_USB_XCEIV if MACH_OMAP3EVM
 	select USB_OTG_UTILS
 	tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
 	help
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index fcec87e..fe4934d 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -53,13 +53,11 @@
 void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
 {
 	void __iomem *fifo = hw_ep->fifo;
+
+#ifdef CONFIG_BF52x
 	u8 epnum = hw_ep->epnum;
 	u16 dma_reg = 0;
 
-	DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
-			'R', hw_ep->epnum, fifo, len, dst);
-
-#ifdef CONFIG_BF52x
 	invalidate_dcache_range((unsigned int)dst,
 		(unsigned int)(dst + len));
 
@@ -102,6 +100,9 @@
 			len & 0x01 ? (len >> 1) + 1 : len >> 1);
 #endif
 
+	DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+			'R', hw_ep->epnum, fifo, len, dst);
+
 	dump_fifo_data(dst, len);
 }
 
@@ -225,8 +226,9 @@
 	return 0;
 }
 
-void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
 {
+	return -EIO;
 }
 
 int __init musb_platform_init(struct musb *musb)
@@ -261,10 +263,6 @@
 		SSYNC();
 	}
 
-	/* TODO
-	 * Set SIC-IVG register
-	 */
-
 	/* Configure PLL oscillator register */
 	bfin_write_USB_PLLOSC_CTRL(0x30a8);
 	SSYNC();
diff --git a/drivers/usb/musb/blackfin.h b/drivers/usb/musb/blackfin.h
index a240c1e..10b7d75 100644
--- a/drivers/usb/musb/blackfin.h
+++ b/drivers/usb/musb/blackfin.h
@@ -14,6 +14,43 @@
  * Blackfin specific definitions
  */
 
+/* Anomalies notes:
+ *
+ *  05000450 - USB DMA Mode 1 Short Packet Data Corruption:
+ *             MUSB driver is designed to transfer buffer of N * maxpacket size
+ *             in DMA mode 1 and leave the rest of the data to the next
+ *             transfer in DMA mode 0, so we never transmit a short packet in
+ *             DMA mode 1.
+ *
+ *  05000463 - This anomaly doesn't affect this driver since it
+ *             never uses L1 or L2 memory as data destination.
+ *
+ *  05000464 - This anomaly doesn't affect this driver since it
+ *             never uses L1 or L2 memory as data source.
+ *
+ *  05000465 - The anomaly can be seen when SCLK is over 100 MHz, and there is
+ *             no way to workaround for bulk endpoints.  Since the wMaxPackSize
+ *             of bulk is less than or equal to 512, while the fifo size of
+ *             endpoint 5, 6, 7 is 1024, the double buffer mode is enabled
+ *             automatically when these endpoints are used for bulk OUT.
+ *
+ *  05000466 - This anomaly doesn't affect this driver since it never mixes
+ *             concurrent DMA and core accesses to the TX endpoint FIFOs.
+ *
+ *  05000467 - The workaround for this anomaly will introduce another
+ *             anomaly - 05000465.
+ */
+
+/* The Mentor USB DMA engine on BF52x (silicon v0.0 and v0.1) seems to be
+ * unstable in host mode.  This may be caused by Anomaly 05000380.  After
+ * digging out the root cause, we will change this number accordingly.
+ * So, need to either use silicon v0.2+ or disable DMA mode in MUSB.
+ */
+#if ANOMALY_05000380 && defined(CONFIG_BF52x) && \
+    defined(CONFIG_USB_MUSB_HDRC) && !defined(CONFIG_MUSB_PIO_ONLY)
+# error "Please use PIO mode in MUSB driver on bf52x chip v0.0 and v0.1"
+#endif
+
 #undef DUMP_FIFO_DATA
 #ifdef DUMP_FIFO_DATA
 static void dump_fifo_data(u8 *buf, u16 len)
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 547e0e3..49f2346 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1319,7 +1319,6 @@
 #endif
 	u8 reg;
 	char *type;
-	u16 hwvers, rev_major, rev_minor;
 	char aInfo[78], aRevision[32], aDate[12];
 	void __iomem	*mbase = musb->mregs;
 	int		status = 0;
@@ -1391,11 +1390,10 @@
 	}
 
 	/* log release info */
-	hwvers = musb_read_hwvers(mbase);
-	rev_major = (hwvers >> 10) & 0x1f;
-	rev_minor = hwvers & 0x3ff;
-	snprintf(aRevision, 32, "%d.%d%s", rev_major,
-		rev_minor, (hwvers & 0x8000) ? "RC" : "");
+	musb->hwvers = musb_read_hwvers(mbase);
+	snprintf(aRevision, 32, "%d.%d%s", MUSB_HWVERS_MAJOR(musb->hwvers),
+		MUSB_HWVERS_MINOR(musb->hwvers),
+		(musb->hwvers & MUSB_HWVERS_RC) ? "RC" : "");
 	printk(KERN_DEBUG "%s: %sHDRC RTL version %s %s\n",
 			musb_driver_name, type, aRevision, aDate);
 
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 6aa5f22..03d5090 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -322,6 +322,14 @@
 	struct clk		*clock;
 	irqreturn_t		(*isr)(int, void *);
 	struct work_struct	irq_work;
+#define MUSB_HWVERS_MAJOR(x)	((x >> 10) & 0x1f)
+#define MUSB_HWVERS_MINOR(x)	(x & 0x3ff)
+#define MUSB_HWVERS_RC		0x8000
+#define MUSB_HWVERS_1300	0x52C
+#define MUSB_HWVERS_1400	0x590
+#define MUSB_HWVERS_1800	0x720
+#define MUSB_HWVERS_2000	0x800
+	u16			hwvers;
 
 /* this hub status bit is reserved by USB 2.0 and not seen by usbcore */
 #define MUSB_PORT_STAT_RESUME	(1 << 31)
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
index 0a2c4e3..916065b 100644
--- a/drivers/usb/musb/musb_dma.h
+++ b/drivers/usb/musb/musb_dma.h
@@ -80,6 +80,17 @@
 #define tusb_dma_omap()			0
 #endif
 
+/* Anomaly 05000456 - USB Receive Interrupt Is Not Generated in DMA Mode 1
+ *	Only allow DMA mode 1 to be used when the USB will actually generate the
+ *	interrupts we expect.
+ */
+#ifdef CONFIG_BLACKFIN
+# undef USE_MODE1
+# if !ANOMALY_05000456
+#  define USE_MODE1
+# endif
+#endif
+
 /*
  * DMA channel status ... updated by the dma controller driver whenever that
  * status changes, and protected by the overall controller spinlock.
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 74073f9..c49b9ba 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -429,112 +429,102 @@
 	DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
 
 	dma = is_dma_capable() ? musb_ep->dma : NULL;
-	do {
-		/* REVISIT for high bandwidth, MUSB_TXCSR_P_INCOMPTX
-		 * probably rates reporting as a host error
+
+	/*
+	 * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
+	 * probably rates reporting as a host error.
+	 */
+	if (csr & MUSB_TXCSR_P_SENTSTALL) {
+		csr |=	MUSB_TXCSR_P_WZC_BITS;
+		csr &= ~MUSB_TXCSR_P_SENTSTALL;
+		musb_writew(epio, MUSB_TXCSR, csr);
+		return;
+	}
+
+	if (csr & MUSB_TXCSR_P_UNDERRUN) {
+		/* We NAKed, no big deal... little reason to care. */
+		csr |=	 MUSB_TXCSR_P_WZC_BITS;
+		csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
+		musb_writew(epio, MUSB_TXCSR, csr);
+		DBG(20, "underrun on ep%d, req %p\n", epnum, request);
+	}
+
+	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+		/*
+		 * SHOULD NOT HAPPEN... has with CPPI though, after
+		 * changing SENDSTALL (and other cases); harmless?
 		 */
-		if (csr & MUSB_TXCSR_P_SENTSTALL) {
+		DBG(5, "%s dma still busy?\n", musb_ep->end_point.name);
+		return;
+	}
+
+	if (request) {
+		u8	is_dma = 0;
+
+		if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
+			is_dma = 1;
 			csr |= MUSB_TXCSR_P_WZC_BITS;
-			csr &= ~MUSB_TXCSR_P_SENTSTALL;
+			csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
+				 MUSB_TXCSR_TXPKTRDY);
 			musb_writew(epio, MUSB_TXCSR, csr);
-			break;
+			/* Ensure writebuffer is empty. */
+			csr = musb_readw(epio, MUSB_TXCSR);
+			request->actual += musb_ep->dma->actual_len;
+			DBG(4, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
+				epnum, csr, musb_ep->dma->actual_len, request);
 		}
 
-		if (csr & MUSB_TXCSR_P_UNDERRUN) {
-			/* we NAKed, no big deal ... little reason to care */
-			csr |= MUSB_TXCSR_P_WZC_BITS;
-			csr &= ~(MUSB_TXCSR_P_UNDERRUN
-					| MUSB_TXCSR_TXPKTRDY);
-			musb_writew(epio, MUSB_TXCSR, csr);
-			DBG(20, "underrun on ep%d, req %p\n", epnum, request);
-		}
-
-		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
-			/* SHOULD NOT HAPPEN ... has with cppi though, after
-			 * changing SENDSTALL (and other cases); harmless?
+		if (is_dma || request->actual == request->length) {
+			/*
+			 * First, maybe a terminating short packet. Some DMA
+			 * engines might handle this by themselves.
 			 */
-			DBG(5, "%s dma still busy?\n", musb_ep->end_point.name);
-			break;
-		}
-
-		if (request) {
-			u8	is_dma = 0;
-
-			if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
-				is_dma = 1;
-				csr |= MUSB_TXCSR_P_WZC_BITS;
-				csr &= ~(MUSB_TXCSR_DMAENAB
-						| MUSB_TXCSR_P_UNDERRUN
-						| MUSB_TXCSR_TXPKTRDY);
-				musb_writew(epio, MUSB_TXCSR, csr);
-				/* ensure writebuffer is empty */
-				csr = musb_readw(epio, MUSB_TXCSR);
-				request->actual += musb_ep->dma->actual_len;
-				DBG(4, "TXCSR%d %04x, dma off, "
-						"len %zu, req %p\n",
-					epnum, csr,
-					musb_ep->dma->actual_len,
-					request);
-			}
-
-			if (is_dma || request->actual == request->length) {
-
-				/* First, maybe a terminating short packet.
-				 * Some DMA engines might handle this by
-				 * themselves.
-				 */
-				if ((request->zero
-						&& request->length
-						&& (request->length
-							% musb_ep->packet_sz)
-							== 0)
+			if ((request->zero && request->length
+				&& request->length % musb_ep->packet_sz == 0)
 #ifdef CONFIG_USB_INVENTRA_DMA
-					|| (is_dma &&
-						((!dma->desired_mode) ||
-						    (request->actual &
-						    (musb_ep->packet_sz - 1))))
+				|| (is_dma && (!dma->desired_mode ||
+					(request->actual &
+						(musb_ep->packet_sz - 1))))
 #endif
-				) {
-					/* on dma completion, fifo may not
-					 * be available yet ...
-					 */
-					if (csr & MUSB_TXCSR_TXPKTRDY)
-						break;
-
-					DBG(4, "sending zero pkt\n");
-					musb_writew(epio, MUSB_TXCSR,
-							MUSB_TXCSR_MODE
-							| MUSB_TXCSR_TXPKTRDY);
-					request->zero = 0;
-				}
-
-				/* ... or if not, then complete it */
-				musb_g_giveback(musb_ep, request, 0);
-
-				/* kickstart next transfer if appropriate;
-				 * the packet that just completed might not
-				 * be transmitted for hours or days.
-				 * REVISIT for double buffering...
-				 * FIXME revisit for stalls too...
+			) {
+				/*
+				 * On DMA completion, FIFO may not be
+				 * available yet...
 				 */
-				musb_ep_select(mbase, epnum);
-				csr = musb_readw(epio, MUSB_TXCSR);
-				if (csr & MUSB_TXCSR_FIFONOTEMPTY)
-					break;
-				request = musb_ep->desc
-						? next_request(musb_ep)
-						: NULL;
-				if (!request) {
-					DBG(4, "%s idle now\n",
-						musb_ep->end_point.name);
-					break;
-				}
+				if (csr & MUSB_TXCSR_TXPKTRDY)
+					return;
+
+				DBG(4, "sending zero pkt\n");
+				musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
+						| MUSB_TXCSR_TXPKTRDY);
+				request->zero = 0;
 			}
 
-			txstate(musb, to_musb_request(request));
+			/* ... or if not, then complete it. */
+			musb_g_giveback(musb_ep, request, 0);
+
+			/*
+			 * Kickstart next transfer if appropriate;
+			 * the packet that just completed might not
+			 * be transmitted for hours or days.
+			 * REVISIT for double buffering...
+			 * FIXME revisit for stalls too...
+			 */
+			musb_ep_select(mbase, epnum);
+			csr = musb_readw(epio, MUSB_TXCSR);
+			if (csr & MUSB_TXCSR_FIFONOTEMPTY)
+				return;
+
+			if (!musb_ep->desc) {
+				DBG(4, "%s idle now\n",
+					musb_ep->end_point.name);
+				return;
+			} else
+				request = next_request(musb_ep);
 		}
 
-	} while (0);
+		txstate(musb, to_musb_request(request));
+	}
 }
 
 /* ------------------------------------------------------------ */
@@ -966,6 +956,7 @@
 
 	musb_ep->desc = desc;
 	musb_ep->busy = 0;
+	musb_ep->wedged = 0;
 	status = 0;
 
 	pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
@@ -1220,7 +1211,7 @@
  *
  * exported to ep0 code
  */
-int musb_gadget_set_halt(struct usb_ep *ep, int value)
+static int musb_gadget_set_halt(struct usb_ep *ep, int value)
 {
 	struct musb_ep		*musb_ep = to_musb_ep(ep);
 	u8			epnum = musb_ep->current_epnum;
@@ -1262,7 +1253,8 @@
 				goto done;
 			}
 		}
-	}
+	} else
+		musb_ep->wedged = 0;
 
 	/* set/clear the stall and toggle bits */
 	DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear");
@@ -1301,6 +1293,21 @@
 	return status;
 }
 
+/*
+ * Sets the halt feature with the clear requests ignored
+ */
+static int musb_gadget_set_wedge(struct usb_ep *ep)
+{
+	struct musb_ep		*musb_ep = to_musb_ep(ep);
+
+	if (!ep)
+		return -EINVAL;
+
+	musb_ep->wedged = 1;
+
+	return usb_ep_set_halt(ep);
+}
+
 static int musb_gadget_fifo_status(struct usb_ep *ep)
 {
 	struct musb_ep		*musb_ep = to_musb_ep(ep);
@@ -1371,6 +1378,7 @@
 	.queue		= musb_gadget_queue,
 	.dequeue	= musb_gadget_dequeue,
 	.set_halt	= musb_gadget_set_halt,
+	.set_wedge	= musb_gadget_set_wedge,
 	.fifo_status	= musb_gadget_fifo_status,
 	.fifo_flush	= musb_gadget_fifo_flush
 };
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
index 59502da..c8b1403 100644
--- a/drivers/usb/musb/musb_gadget.h
+++ b/drivers/usb/musb/musb_gadget.h
@@ -75,6 +75,8 @@
 	/* later things are modified based on usage */
 	struct list_head		req_list;
 
+	u8				wedged;
+
 	/* true if lock must be dropped but req_list may not be advanced */
 	u8				busy;
 };
@@ -103,6 +105,4 @@
 
 extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
 
-extern int musb_gadget_set_halt(struct usb_ep *ep, int value);
-
 #endif		/* __MUSB_GADGET_H */
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 522efb3..8fba3f1 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -199,7 +199,6 @@
 static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req)
 {
 	musb_g_giveback(&musb->endpoints[0].ep_in, req, 0);
-	musb->ep0_state = MUSB_EP0_STAGE_SETUP;
 }
 
 /*
@@ -258,30 +257,53 @@
 			case USB_RECIP_INTERFACE:
 				break;
 			case USB_RECIP_ENDPOINT:{
-				const u8 num = ctrlrequest->wIndex & 0x0f;
-				struct musb_ep *musb_ep;
+				const u8		epnum =
+					ctrlrequest->wIndex & 0x0f;
+				struct musb_ep		*musb_ep;
+				struct musb_hw_ep	*ep;
+				void __iomem		*regs;
+				int			is_in;
+				u16			csr;
 
-				if (num == 0
-						|| num >= MUSB_C_NUM_EPS
-						|| ctrlrequest->wValue
-							!= USB_ENDPOINT_HALT)
+				if (epnum == 0 || epnum >= MUSB_C_NUM_EPS ||
+				    ctrlrequest->wValue != USB_ENDPOINT_HALT)
 					break;
 
-				if (ctrlrequest->wIndex & USB_DIR_IN)
-					musb_ep = &musb->endpoints[num].ep_in;
+				ep = musb->endpoints + epnum;
+				regs = ep->regs;
+				is_in = ctrlrequest->wIndex & USB_DIR_IN;
+				if (is_in)
+					musb_ep = &ep->ep_in;
 				else
-					musb_ep = &musb->endpoints[num].ep_out;
+					musb_ep = &ep->ep_out;
 				if (!musb_ep->desc)
 					break;
 
-				/* REVISIT do it directly, no locking games */
-				spin_unlock(&musb->lock);
-				musb_gadget_set_halt(&musb_ep->end_point, 0);
-				spin_lock(&musb->lock);
+				handled = 1;
+				/* Ignore request if endpoint is wedged */
+				if (musb_ep->wedged)
+					break;
+
+				musb_ep_select(mbase, epnum);
+				if (is_in) {
+					csr  = musb_readw(regs, MUSB_TXCSR);
+					csr |= MUSB_TXCSR_CLRDATATOG |
+					       MUSB_TXCSR_P_WZC_BITS;
+					csr &= ~(MUSB_TXCSR_P_SENDSTALL |
+						 MUSB_TXCSR_P_SENTSTALL |
+						 MUSB_TXCSR_TXPKTRDY);
+					musb_writew(regs, MUSB_TXCSR, csr);
+				} else {
+					csr  = musb_readw(regs, MUSB_RXCSR);
+					csr |= MUSB_RXCSR_CLRDATATOG |
+					       MUSB_RXCSR_P_WZC_BITS;
+					csr &= ~(MUSB_RXCSR_P_SENDSTALL |
+						 MUSB_RXCSR_P_SENTSTALL);
+					musb_writew(regs, MUSB_RXCSR, csr);
+				}
 
 				/* select ep0 again */
 				musb_ep_select(mbase, 0);
-				handled = 1;
 				} break;
 			default:
 				/* class, vendor, etc ... delegate */
@@ -374,10 +396,8 @@
 				int			is_in;
 				u16			csr;
 
-				if (epnum == 0
-						|| epnum >= MUSB_C_NUM_EPS
-						|| ctrlrequest->wValue
-							!= USB_ENDPOINT_HALT)
+				if (epnum == 0 || epnum >= MUSB_C_NUM_EPS ||
+				    ctrlrequest->wValue	!= USB_ENDPOINT_HALT)
 					break;
 
 				ep = musb->endpoints + epnum;
@@ -392,24 +412,20 @@
 
 				musb_ep_select(mbase, epnum);
 				if (is_in) {
-					csr = musb_readw(regs,
-							MUSB_TXCSR);
+					csr = musb_readw(regs, MUSB_TXCSR);
 					if (csr & MUSB_TXCSR_FIFONOTEMPTY)
 						csr |= MUSB_TXCSR_FLUSHFIFO;
 					csr |= MUSB_TXCSR_P_SENDSTALL
 						| MUSB_TXCSR_CLRDATATOG
 						| MUSB_TXCSR_P_WZC_BITS;
-					musb_writew(regs, MUSB_TXCSR,
-							csr);
+					musb_writew(regs, MUSB_TXCSR, csr);
 				} else {
-					csr = musb_readw(regs,
-							MUSB_RXCSR);
+					csr = musb_readw(regs, MUSB_RXCSR);
 					csr |= MUSB_RXCSR_P_SENDSTALL
 						| MUSB_RXCSR_FLUSHFIFO
 						| MUSB_RXCSR_CLRDATATOG
 						| MUSB_RXCSR_P_WZC_BITS;
-					musb_writew(regs, MUSB_RXCSR,
-							csr);
+					musb_writew(regs, MUSB_RXCSR, csr);
 				}
 
 				/* select ep0 again */
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index e3ab40a..74c4c36 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1642,18 +1642,18 @@
 			c = musb->dma_controller;
 
 			if (usb_pipeisoc(pipe)) {
-				int status = 0;
+				int d_status = 0;
 				struct usb_iso_packet_descriptor *d;
 
 				d = urb->iso_frame_desc + qh->iso_idx;
 
 				if (iso_err) {
-					status = -EILSEQ;
+					d_status = -EILSEQ;
 					urb->error_count++;
 				}
 				if (rx_count > d->length) {
-					if (status == 0) {
-						status = -EOVERFLOW;
+					if (d_status == 0) {
+						d_status = -EOVERFLOW;
 						urb->error_count++;
 					}
 					DBG(2, "** OVERFLOW %d into %d\n",\
@@ -1662,7 +1662,7 @@
 					length = d->length;
 				} else
 					length = rx_count;
-				d->status = status;
+				d->status = d_status;
 				buf = urb->transfer_dma + d->offset;
 			} else {
 				length = rx_count;
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index cc1d71b..473a94e 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -465,9 +465,9 @@
 	return 0;
 }
 
-static inline u16 musb_read_target_reg_base(u8 i, void __iomem *mbase)
+static inline void __iomem *musb_read_target_reg_base(u8 i, void __iomem *mbase)
 {
-	return 0;
+	return NULL;
 }
 
 static inline void musb_write_rxfunaddr(void __iomem *ep_target_regs,
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 5e83f96..a237550 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -259,6 +259,11 @@
 	if (!int_hsdma)
 		goto done;
 
+#ifdef CONFIG_BLACKFIN
+	/* Clear DMA interrupt flags */
+	musb_writeb(mbase, MUSB_HSDMA_INTR, int_hsdma);
+#endif
+
 	for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
 		if (int_hsdma & (1 << bchannel)) {
 			musb_channel = (struct musb_dma_channel *)
@@ -280,7 +285,7 @@
 				channel->actual_len = addr
 					- musb_channel->start_addr;
 
-				DBG(2, "ch %p, 0x%x -> 0x%x (%d / %d) %s\n",
+				DBG(2, "ch %p, 0x%x -> 0x%x (%zu / %d) %s\n",
 					channel, musb_channel->start_addr,
 					addr, channel->actual_len,
 					musb_channel->len,
@@ -324,11 +329,6 @@
 		}
 	}
 
-#ifdef CONFIG_BLACKFIN
-	/* Clear DMA interrup flags */
-	musb_writeb(mbase, MUSB_HSDMA_INTR, int_hsdma);
-#endif
-
 	retval = IRQ_HANDLED;
 done:
 	spin_unlock_irqrestore(&musb->lock, flags);
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 6761d20..83beeac 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -315,7 +315,7 @@
 	musb_platform_suspend(musb);
 
 	clk_put(musb->clock);
-	musb->clock = 0;
+	musb->clock = NULL;
 
 	return 0;
 }
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index aa884d0..de56b3d 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -41,6 +41,15 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called isp1301_omap.
 
+config USB_ULPI
+	bool "Generic ULPI Transceiver Driver"
+	depends on ARM
+	help
+	  Enable this to support ULPI connected USB OTG transceivers which
+	  are likely found on embedded boards.
+
+	  The only chip currently supported is NXP's ISP1504
+
 config TWL4030_USB
 	tristate "TWL4030 USB Transceiver Driver"
 	depends on TWL4030_CORE && REGULATOR_TWL4030
diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
index 2081678..aeb49a8 100644
--- a/drivers/usb/otg/Makefile
+++ b/drivers/usb/otg/Makefile
@@ -10,6 +10,7 @@
 obj-$(CONFIG_ISP1301_OMAP)	+= isp1301_omap.o
 obj-$(CONFIG_TWL4030_USB)	+= twl4030-usb.o
 obj-$(CONFIG_NOP_USB_XCEIV)	+= nop-usb-xceiv.o
+obj-$(CONFIG_USB_ULPI)		+= ulpi.o
 
 ccflags-$(CONFIG_USB_DEBUG)	+= -DDEBUG
 ccflags-$(CONFIG_USB_GADGET_DEBUG) += -DDEBUG
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index 9e3e7a5..bd9883f 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -598,12 +598,12 @@
 		 * USB_LINK_VBUS state.  musb_hdrc won't care until it
 		 * starts to handle softconnect right.
 		 */
-		twl4030charger_usb_en(status == USB_LINK_VBUS);
-
 		if (status == USB_LINK_NONE)
 			twl4030_phy_suspend(twl, 0);
 		else
 			twl4030_phy_resume(twl);
+
+		twl4030charger_usb_en(status == USB_LINK_VBUS);
 	}
 	sysfs_notify(&twl->dev->kobj, NULL, "vbus");
 
diff --git a/drivers/usb/otg/ulpi.c b/drivers/usb/otg/ulpi.c
new file mode 100644
index 0000000..8965274
--- /dev/null
+++ b/drivers/usb/otg/ulpi.c
@@ -0,0 +1,136 @@
+/*
+ * Generic ULPI USB transceiver support
+ *
+ * Copyright (C) 2009 Daniel Mack <daniel@caiaq.de>
+ *
+ * Based on sources from
+ *
+ *   Sascha Hauer <s.hauer@pengutronix.de>
+ *   Freescale Semiconductors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/usb.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/ulpi.h>
+
+/* ULPI register addresses */
+#define ULPI_VID_LOW         0x00    /* Vendor ID low */
+#define ULPI_VID_HIGH        0x01    /* Vendor ID high */
+#define ULPI_PID_LOW         0x02    /* Product ID low */
+#define ULPI_PID_HIGH        0x03    /* Product ID high */
+#define ULPI_ITFCTL          0x07    /* Interface Control */
+#define ULPI_OTGCTL          0x0A    /* OTG Control */
+
+/* add to above register address to access Set/Clear functions */
+#define ULPI_REG_SET         0x01
+#define ULPI_REG_CLEAR       0x02
+
+/* ULPI OTG Control Register bits */
+#define ID_PULL_UP              (1 << 0)        /* enable ID Pull Up */
+#define DP_PULL_DOWN            (1 << 1)        /* enable DP Pull Down */
+#define DM_PULL_DOWN            (1 << 2)        /* enable DM Pull Down */
+#define DISCHRG_VBUS            (1 << 3)        /* Discharge Vbus */
+#define CHRG_VBUS               (1 << 4)        /* Charge Vbus */
+#define DRV_VBUS                (1 << 5)        /* Drive Vbus */
+#define DRV_VBUS_EXT            (1 << 6)        /* Drive Vbus external */
+#define USE_EXT_VBUS_IND        (1 << 7)        /* Use ext. Vbus indicator */
+
+#define ULPI_ID(vendor, product) (((vendor) << 16) | (product))
+
+#define TR_FLAG(flags, a, b)	(((flags) & a) ? b : 0)
+
+/* ULPI hardcoded IDs, used for probing */
+static unsigned int ulpi_ids[] = {
+	ULPI_ID(0x04cc, 0x1504),	/* NXP ISP1504 */
+};
+
+static int ulpi_set_flags(struct otg_transceiver *otg)
+{
+	unsigned int flags = 0;
+
+	if (otg->flags & USB_OTG_PULLUP_ID)
+		flags |= ID_PULL_UP;
+
+	if (otg->flags & USB_OTG_PULLDOWN_DM)
+		flags |= DM_PULL_DOWN;
+
+	if (otg->flags & USB_OTG_PULLDOWN_DP)
+		flags |= DP_PULL_DOWN;
+
+	if (otg->flags & USB_OTG_EXT_VBUS_INDICATOR)
+		flags |= USE_EXT_VBUS_IND;
+
+	return otg_io_write(otg, flags, ULPI_OTGCTL + ULPI_REG_SET);
+}
+
+static int ulpi_init(struct otg_transceiver *otg)
+{
+	int i, vid, pid;
+
+	vid = (otg_io_read(otg, ULPI_VID_HIGH) << 8) |
+	       otg_io_read(otg, ULPI_VID_LOW);
+	pid = (otg_io_read(otg, ULPI_PID_HIGH) << 8) |
+	       otg_io_read(otg, ULPI_PID_LOW);
+
+	pr_info("ULPI transceiver vendor/product ID 0x%04x/0x%04x\n", vid, pid);
+
+	for (i = 0; i < ARRAY_SIZE(ulpi_ids); i++)
+		if (ulpi_ids[i] == ULPI_ID(vid, pid))
+			return ulpi_set_flags(otg);
+
+	pr_err("ULPI ID does not match any known transceiver.\n");
+	return -ENODEV;
+}
+
+static int ulpi_set_vbus(struct otg_transceiver *otg, bool on)
+{
+	unsigned int flags = otg_io_read(otg, ULPI_OTGCTL);
+
+	flags &= ~(DRV_VBUS | DRV_VBUS_EXT);
+
+	if (on) {
+		if (otg->flags & USB_OTG_DRV_VBUS)
+			flags |= DRV_VBUS;
+
+		if (otg->flags & USB_OTG_DRV_VBUS_EXT)
+			flags |= DRV_VBUS_EXT;
+	}
+
+	return otg_io_write(otg, flags, ULPI_OTGCTL + ULPI_REG_SET);
+}
+
+struct otg_transceiver *
+otg_ulpi_create(struct otg_io_access_ops *ops,
+		unsigned int flags)
+{
+	struct otg_transceiver *otg;
+
+	otg = kzalloc(sizeof(*otg), GFP_KERNEL);
+	if (!otg)
+		return NULL;
+
+	otg->label	= "ULPI";
+	otg->flags	= flags;
+	otg->io_ops	= ops;
+	otg->init	= ulpi_init;
+	otg->set_vbus	= ulpi_set_vbus;
+
+	return otg;
+}
+EXPORT_SYMBOL_GPL(otg_ulpi_create);
+
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 131e61a..a9c2dec 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -1,4 +1,6 @@
 /*
+ * Copyright (C) 2009 by Bart Hartgers (bart.hartgers+ark3116@gmail.com)
+ * Original version:
  * Copyright (C) 2006
  *   Simon Schulz (ark3116_driver <at> auctionant.de)
  *
@@ -6,10 +8,13 @@
  * - implements a driver for the arkmicro ark3116 chipset (vendor=0x6547,
  *   productid=0x0232) (used in a datacable called KQ-U8A)
  *
- * - based on code by krisfx -> thanks !!
- *   (see http://www.linuxquestions.org/questions/showthread.php?p=2184457#post2184457)
+ * Supports full modem status lines, break, hardware flow control. Does not
+ * support software flow control, since I do not know how to enable it in hw.
  *
- *  - based on logs created by usbsnoopy
+ * This driver is a essentially new implementation. I initially dug
+ * into the old ark3116.c driver and suddenly realized the ark3116 is
+ * a 16450 with a USB interface glued to it. See comments at the
+ * bottom of this file.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the
@@ -19,15 +24,31 @@
 
 #include <linux/kernel.h>
 #include <linux/init.h>
+#include <linux/ioctl.h>
 #include <linux/tty.h>
+#include <linux/tty_flip.h>
 #include <linux/module.h>
 #include <linux/usb.h>
 #include <linux/usb/serial.h>
 #include <linux/serial.h>
+#include <linux/serial_reg.h>
 #include <linux/uaccess.h>
-
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
 
 static int debug;
+/*
+ * Version information
+ */
+
+#define DRIVER_VERSION "v0.5"
+#define DRIVER_AUTHOR "Bart Hartgers <bart.hartgers+ark3116@gmail.com>"
+#define DRIVER_DESC "USB ARK3116 serial/IrDA driver"
+#define DRIVER_DEV_DESC "ARK3116 RS232/IrDA"
+#define DRIVER_NAME "ark3116"
+
+/* usb timeout of 1 second */
+#define ARK_TIMEOUT (1*HZ)
 
 static struct usb_device_id id_table [] = {
 	{ USB_DEVICE(0x6547, 0x0232) },
@@ -45,118 +66,152 @@
 	return 0;
 }
 
-static inline void ARK3116_SND(struct usb_serial *serial, int seq,
-			       __u8 request, __u8 requesttype,
-			       __u16 value, __u16 index)
+struct ark3116_private {
+	wait_queue_head_t       delta_msr_wait;
+	struct async_icount	icount;
+	int			irda;	/* 1 for irda device */
+
+	/* protects hw register updates */
+	struct mutex		hw_lock;
+
+	int			quot;	/* baudrate divisor */
+	__u32			lcr;	/* line control register value */
+	__u32			hcr;	/* handshake control register (0x8)
+					 * value */
+	__u32			mcr;	/* modem contol register value */
+
+	/* protects the status values below */
+	spinlock_t		status_lock;
+	__u32			msr;	/* modem status register value */
+	__u32			lsr;	/* line status register value */
+};
+
+static int ark3116_write_reg(struct usb_serial *serial,
+			     unsigned reg, __u8 val)
 {
 	int result;
+	 /* 0xfe 0x40 are magic values taken from original driver */
 	result = usb_control_msg(serial->dev,
 				 usb_sndctrlpipe(serial->dev, 0),
-				 request, requesttype, value, index,
-				 NULL, 0x00, 1000);
-	dbg("%03d > ok", seq);
+				 0xfe, 0x40, val, reg,
+				 NULL, 0, ARK_TIMEOUT);
+	return result;
 }
 
-static inline void ARK3116_RCV(struct usb_serial *serial, int seq,
-			       __u8 request, __u8 requesttype,
-			       __u16 value, __u16 index, __u8 expected,
-			       char *buf)
+static int ark3116_read_reg(struct usb_serial *serial,
+			    unsigned reg, unsigned char *buf)
 {
 	int result;
+	/* 0xfe 0xc0 are magic values taken from original driver */
 	result = usb_control_msg(serial->dev,
 				 usb_rcvctrlpipe(serial->dev, 0),
-				 request, requesttype, value, index,
-				 buf, 0x0000001, 1000);
-	if (result)
-		dbg("%03d < %d bytes [0x%02X]", seq, result,
-		    ((unsigned char *)buf)[0]);
+				 0xfe, 0xc0, 0, reg,
+				 buf, 1, ARK_TIMEOUT);
+	if (result < 0)
+		return result;
 	else
-		dbg("%03d < 0 bytes", seq);
+		return buf[0];
 }
 
-static inline void ARK3116_RCV_QUIET(struct usb_serial *serial,
-				     __u8 request, __u8 requesttype,
-				     __u16 value, __u16 index, char *buf)
+static inline int calc_divisor(int bps)
 {
-	usb_control_msg(serial->dev,
-			usb_rcvctrlpipe(serial->dev, 0),
-			request, requesttype, value, index,
-			buf, 0x0000001, 1000);
+	/* Original ark3116 made some exceptions in rounding here
+	 * because windows did the same. Assume that is not really
+	 * necessary.
+	 * Crystal is 12MHz, probably because of USB, but we divide by 4?
+	 */
+	return (12000000 + 2*bps) / (4*bps);
 }
 
 static int ark3116_attach(struct usb_serial *serial)
 {
-	char *buf;
+	struct usb_serial_port *port = serial->port[0];
+	struct ark3116_private *priv;
 
-	buf = kmalloc(1, GFP_KERNEL);
-	if (!buf) {
-		dbg("error kmalloc -> out of mem?");
+	/* make sure we have our end-points */
+	if ((serial->num_bulk_in == 0) ||
+	    (serial->num_bulk_out == 0) ||
+	    (serial->num_interrupt_in == 0)) {
+		dev_err(&serial->dev->dev,
+			"%s - missing endpoint - "
+			"bulk in: %d, bulk out: %d, int in %d\n",
+			KBUILD_MODNAME,
+			serial->num_bulk_in,
+			serial->num_bulk_out,
+			serial->num_interrupt_in);
+		return -EINVAL;
+	}
+
+	priv = kzalloc(sizeof(struct ark3116_private),
+		       GFP_KERNEL);
+	if (!priv)
 		return -ENOMEM;
+
+	init_waitqueue_head(&priv->delta_msr_wait);
+	mutex_init(&priv->hw_lock);
+	spin_lock_init(&priv->status_lock);
+
+	priv->irda = is_irda(serial);
+
+	usb_set_serial_port_data(port, priv);
+
+	/* setup the hardware */
+	ark3116_write_reg(serial, UART_IER, 0);
+	/* disable DMA */
+	ark3116_write_reg(serial, UART_FCR, 0);
+	/* handshake control */
+	priv->hcr = 0;
+	ark3116_write_reg(serial, 0x8     , 0);
+	/* modem control */
+	priv->mcr = 0;
+	ark3116_write_reg(serial, UART_MCR, 0);
+
+	if (!(priv->irda)) {
+		ark3116_write_reg(serial, 0xb , 0);
+	} else {
+		ark3116_write_reg(serial, 0xb , 1);
+		ark3116_write_reg(serial, 0xc , 0);
+		ark3116_write_reg(serial, 0xd , 0x41);
+		ark3116_write_reg(serial, 0xa , 1);
 	}
 
-	if (is_irda(serial))
-		dbg("IrDA mode");
+	/* setup baudrate */
+	ark3116_write_reg(serial, UART_LCR, UART_LCR_DLAB);
 
-	/* 3 */
-	ARK3116_SND(serial, 3, 0xFE, 0x40, 0x0008, 0x0002);
-	ARK3116_SND(serial, 4, 0xFE, 0x40, 0x0008, 0x0001);
-	ARK3116_SND(serial, 5, 0xFE, 0x40, 0x0000, 0x0008);
-	ARK3116_SND(serial, 6, 0xFE, 0x40, is_irda(serial) ? 0x0001 : 0x0000,
-		    0x000B);
+	/* setup for 9600 8N1 */
+	priv->quot = calc_divisor(9600);
+	ark3116_write_reg(serial, UART_DLL, priv->quot & 0xff);
+	ark3116_write_reg(serial, UART_DLM, (priv->quot>>8) & 0xff);
 
-	if (is_irda(serial)) {
-		ARK3116_SND(serial, 1001, 0xFE, 0x40, 0x0000, 0x000C);
-		ARK3116_SND(serial, 1002, 0xFE, 0x40, 0x0041, 0x000D);
-		ARK3116_SND(serial, 1003, 0xFE, 0x40, 0x0001, 0x000A);
-	}
+	priv->lcr = UART_LCR_WLEN8;
+	ark3116_write_reg(serial, UART_LCR, UART_LCR_WLEN8);
 
-	/* <-- seq7 */
-	ARK3116_RCV(serial,  7, 0xFE, 0xC0, 0x0000, 0x0003, 0x00, buf);
-	ARK3116_SND(serial,  8, 0xFE, 0x40, 0x0080, 0x0003);
-	ARK3116_SND(serial,  9, 0xFE, 0x40, 0x001A, 0x0000);
-	ARK3116_SND(serial, 10, 0xFE, 0x40, 0x0000, 0x0001);
-	ARK3116_SND(serial, 11, 0xFE, 0x40, 0x0000, 0x0003);
+	ark3116_write_reg(serial, 0xe, 0);
 
-	/* <-- seq12 */
-	ARK3116_RCV(serial, 12, 0xFE, 0xC0, 0x0000, 0x0004, 0x00, buf);
-	ARK3116_SND(serial, 13, 0xFE, 0x40, 0x0000, 0x0004);
+	if (priv->irda)
+		ark3116_write_reg(serial, 0x9, 0);
 
-	/* 14 */
-	ARK3116_RCV(serial, 14, 0xFE, 0xC0, 0x0000, 0x0004, 0x00, buf);
-	ARK3116_SND(serial, 15, 0xFE, 0x40, 0x0000, 0x0004);
-
-	/* 16 */
-	ARK3116_RCV(serial, 16, 0xFE, 0xC0, 0x0000, 0x0004, 0x00, buf);
-	/* --> seq17 */
-	ARK3116_SND(serial, 17, 0xFE, 0x40, 0x0001, 0x0004);
-
-	/* <-- seq18 */
-	ARK3116_RCV(serial, 18, 0xFE, 0xC0, 0x0000, 0x0004, 0x01, buf);
-
-	/* --> seq19 */
-	ARK3116_SND(serial, 19, 0xFE, 0x40, 0x0003, 0x0004);
-
-	/* <-- seq20 */
-	/* seems like serial port status info (RTS, CTS, ...) */
-	/* returns modem control line status?! */
-	ARK3116_RCV(serial, 20, 0xFE, 0xC0, 0x0000, 0x0006, 0xFF, buf);
-
-	/* set 9600 baud & do some init?! */
-	ARK3116_SND(serial, 147, 0xFE, 0x40, 0x0083, 0x0003);
-	ARK3116_SND(serial, 148, 0xFE, 0x40, 0x0038, 0x0000);
-	ARK3116_SND(serial, 149, 0xFE, 0x40, 0x0001, 0x0001);
-	if (is_irda(serial))
-		ARK3116_SND(serial, 1004, 0xFE, 0x40, 0x0000, 0x0009);
-	ARK3116_SND(serial, 150, 0xFE, 0x40, 0x0003, 0x0003);
-	ARK3116_RCV(serial, 151, 0xFE, 0xC0, 0x0000, 0x0004, 0x03, buf);
-	ARK3116_SND(serial, 152, 0xFE, 0x40, 0x0000, 0x0003);
-	ARK3116_RCV(serial, 153, 0xFE, 0xC0, 0x0000, 0x0003, 0x00, buf);
-	ARK3116_SND(serial, 154, 0xFE, 0x40, 0x0003, 0x0003);
-
-	kfree(buf);
+	dev_info(&serial->dev->dev,
+		"%s using %s mode\n",
+		KBUILD_MODNAME,
+		priv->irda ? "IrDA" : "RS232");
 	return 0;
 }
 
+static void ark3116_release(struct usb_serial *serial)
+{
+	struct usb_serial_port *port = serial->port[0];
+	struct ark3116_private *priv = usb_get_serial_port_data(port);
+
+	/* device is closed, so URBs and DMA should be down */
+
+	usb_set_serial_port_data(port, NULL);
+
+	mutex_destroy(&priv->hw_lock);
+
+	kfree(priv);
+}
+
 static void ark3116_init_termios(struct tty_struct *tty)
 {
 	struct ktermios *termios = tty->termios;
@@ -172,200 +227,189 @@
 				struct ktermios *old_termios)
 {
 	struct usb_serial *serial = port->serial;
+	struct ark3116_private *priv = usb_get_serial_port_data(port);
 	struct ktermios *termios = tty->termios;
 	unsigned int cflag = termios->c_cflag;
-	int baud;
-	int ark3116_baud;
-	char *buf;
-	char config;
+	int bps = tty_get_baud_rate(tty);
+	int quot;
+	__u8 lcr, hcr, eval;
 
-	config = 0;
-
-	dbg("%s - port %d", __func__, port->number);
-
-
-	cflag = termios->c_cflag;
-	termios->c_cflag &= ~(CMSPAR|CRTSCTS);
-
-	buf = kmalloc(1, GFP_KERNEL);
-	if (!buf) {
-		dbg("error kmalloc");
-		*termios = *old_termios;
-		return;
-	}
-
-	/* set data bit count (8/7/6/5) */
-	if (cflag & CSIZE) {
-		switch (cflag & CSIZE) {
-		case CS5:
-			config |= 0x00;
-			dbg("setting CS5");
-			break;
-		case CS6:
-			config |= 0x01;
-			dbg("setting CS6");
-			break;
-		case CS7:
-			config |= 0x02;
-			dbg("setting CS7");
-			break;
-		default:
-			dbg("CSIZE was set but not CS5-CS8, using CS8!");
-			/* fall through */
-		case CS8:
-			config |= 0x03;
-			dbg("setting CS8");
-			break;
-		}
-	}
-
-	/* set parity (NONE/EVEN/ODD) */
-	if (cflag & PARENB) {
-		if (cflag & PARODD) {
-			config |= 0x08;
-			dbg("setting parity to ODD");
-		} else {
-			config |= 0x18;
-			dbg("setting parity to EVEN");
-		}
-	} else {
-		dbg("setting parity to NONE");
-	}
-
-	/* set stop bit (1/2) */
-	if (cflag & CSTOPB) {
-		config |= 0x04;
-		dbg("setting 2 stop bits");
-	} else {
-		dbg("setting 1 stop bit");
-	}
-
-	/* set baudrate */
-	baud = tty_get_baud_rate(tty);
-
-	switch (baud) {
-	case 75:
-	case 150:
-	case 300:
-	case 600:
-	case 1200:
-	case 1800:
-	case 2400:
-	case 4800:
-	case 9600:
-	case 19200:
-	case 38400:
-	case 57600:
-	case 115200:
-	case 230400:
-	case 460800:
-		/* Report the resulting rate back to the caller */
-		tty_encode_baud_rate(tty, baud, baud);
+	/* set data bit count */
+	switch (cflag & CSIZE) {
+	case CS5:
+		lcr = UART_LCR_WLEN5;
 		break;
-	/* set 9600 as default (if given baudrate is invalid for example) */
+	case CS6:
+		lcr = UART_LCR_WLEN6;
+		break;
+	case CS7:
+		lcr = UART_LCR_WLEN7;
+		break;
 	default:
-		tty_encode_baud_rate(tty, 9600, 9600);
+	case CS8:
+		lcr = UART_LCR_WLEN8;
+		break;
+	}
+	if (cflag & CSTOPB)
+		lcr |= UART_LCR_STOP;
+	if (cflag & PARENB)
+		lcr |= UART_LCR_PARITY;
+	if (!(cflag & PARODD))
+		lcr |= UART_LCR_EPAR;
+#ifdef CMSPAR
+	if (cflag & CMSPAR)
+		lcr |= UART_LCR_SPAR;
+#endif
+	/* handshake control */
+	hcr = (cflag & CRTSCTS) ? 0x03 : 0x00;
+
+	/* calc baudrate */
+	dbg("%s - setting bps to %d", __func__, bps);
+	eval = 0;
+	switch (bps) {
 	case 0:
-		baud = 9600;
+		quot = calc_divisor(9600);
+		break;
+	default:
+		if ((bps < 75) || (bps > 3000000))
+			bps = 9600;
+		quot = calc_divisor(bps);
+		break;
+	case 460800:
+		eval = 1;
+		quot = calc_divisor(bps);
+		break;
+	case 921600:
+		eval = 2;
+		quot = calc_divisor(bps);
+		break;
 	}
 
-	/*
-	 * found by try'n'error, be careful, maybe there are other options
-	 * for multiplicator etc! (3.5 for example)
-	 */
-	if (baud == 460800)
-		/* strange, for 460800 the formula is wrong
-		 * if using round() then 9600baud is wrong) */
-		ark3116_baud = 7;
-	else
-		ark3116_baud = 3000000 / baud;
+	/* Update state: synchronize */
+	mutex_lock(&priv->hw_lock);
 
-	/* ? */
-	ARK3116_RCV(serial, 0, 0xFE, 0xC0, 0x0000, 0x0003, 0x03, buf);
+	/* keep old LCR_SBC bit */
+	lcr |= (priv->lcr & UART_LCR_SBC);
 
-	/* offset = buf[0]; */
-	/* offset = 0x03; */
-	/* dbg("using 0x%04X as target for 0x0003:", 0x0080 + offset); */
+	dbg("%s - setting hcr:0x%02x,lcr:0x%02x,quot:%d",
+	    __func__, hcr, lcr, quot);
 
-	/* set baudrate */
-	dbg("setting baudrate to %d (->reg=%d)", baud, ark3116_baud);
-	ARK3116_SND(serial, 147, 0xFE, 0x40, 0x0083, 0x0003);
-	ARK3116_SND(serial, 148, 0xFE, 0x40,
-			    (ark3116_baud & 0x00FF), 0x0000);
-	ARK3116_SND(serial, 149, 0xFE, 0x40,
-			    (ark3116_baud & 0xFF00) >> 8, 0x0001);
-	ARK3116_SND(serial, 150, 0xFE, 0x40, 0x0003, 0x0003);
+	/* handshake control */
+	if (priv->hcr != hcr) {
+		priv->hcr = hcr;
+		ark3116_write_reg(serial, 0x8, hcr);
+	}
 
-	/* ? */
-	ARK3116_RCV(serial, 151, 0xFE, 0xC0, 0x0000, 0x0004, 0x03, buf);
-	ARK3116_SND(serial, 152, 0xFE, 0x40, 0x0000, 0x0003);
+	/* baudrate */
+	if (priv->quot != quot) {
+		priv->quot = quot;
+		priv->lcr = lcr; /* need to write lcr anyway */
 
-	/* set data bit count, stop bit count & parity: */
-	dbg("updating bit count, stop bit or parity (cfg=0x%02X)", config);
-	ARK3116_RCV(serial, 153, 0xFE, 0xC0, 0x0000, 0x0003, 0x00, buf);
-	ARK3116_SND(serial, 154, 0xFE, 0x40, config, 0x0003);
+		/* disable DMA since transmit/receive is
+		 * shadowed by UART_DLL
+		 */
+		ark3116_write_reg(serial, UART_FCR, 0);
 
-	if (cflag & CRTSCTS)
-		dbg("CRTSCTS not supported by chipset?!");
+		ark3116_write_reg(serial, UART_LCR,
+				  lcr|UART_LCR_DLAB);
+		ark3116_write_reg(serial, UART_DLL, quot & 0xff);
+		ark3116_write_reg(serial, UART_DLM, (quot>>8) & 0xff);
 
-	/* TEST ARK3116_SND(154, 0xFE, 0x40, 0xFFFF, 0x0006); */
+		/* restore lcr */
+		ark3116_write_reg(serial, UART_LCR, lcr);
+		/* magic baudrate thingy: not sure what it does,
+		 * but windows does this as well.
+		 */
+		ark3116_write_reg(serial, 0xe, eval);
 
-	kfree(buf);
+		/* enable DMA */
+		ark3116_write_reg(serial, UART_FCR, UART_FCR_DMA_SELECT);
+	} else if (priv->lcr != lcr) {
+		priv->lcr = lcr;
+		ark3116_write_reg(serial, UART_LCR, lcr);
+	}
 
-	return;
+	mutex_unlock(&priv->hw_lock);
+
+	/* check for software flow control */
+	if (I_IXOFF(tty) || I_IXON(tty)) {
+		dev_warn(&serial->dev->dev,
+			 "%s: don't know how to do software flow control\n",
+			 KBUILD_MODNAME);
+	}
+
+	/* Don't rewrite B0 */
+	if (tty_termios_baud_rate(termios))
+		tty_termios_encode_baud_rate(termios, bps, bps);
+}
+
+static void ark3116_close(struct usb_serial_port *port)
+{
+	struct usb_serial *serial = port->serial;
+
+	if (serial->dev) {
+		/* disable DMA */
+		ark3116_write_reg(serial, UART_FCR, 0);
+
+		/* deactivate interrupts */
+		ark3116_write_reg(serial, UART_IER, 0);
+
+		/* shutdown any bulk reads that might be going on */
+		if (serial->num_bulk_out)
+			usb_kill_urb(port->write_urb);
+		if (serial->num_bulk_in)
+			usb_kill_urb(port->read_urb);
+		if (serial->num_interrupt_in)
+			usb_kill_urb(port->interrupt_in_urb);
+	}
 }
 
 static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port)
 {
-	struct ktermios tmp_termios;
+	struct ark3116_private *priv = usb_get_serial_port_data(port);
 	struct usb_serial *serial = port->serial;
-	char *buf;
-	int result = 0;
-
-	dbg("%s - port %d", __func__, port->number);
+	unsigned char *buf;
+	int result;
 
 	buf = kmalloc(1, GFP_KERNEL);
-	if (!buf) {
-		dbg("error kmalloc -> out of mem?");
+	if (buf == NULL)
 		return -ENOMEM;
-	}
 
 	result = usb_serial_generic_open(tty, port);
-	if (result)
+	if (result) {
+		dbg("%s - usb_serial_generic_open failed: %d",
+		    __func__, result);
 		goto err_out;
+	}
 
-	/* open */
-	ARK3116_RCV(serial, 111, 0xFE, 0xC0, 0x0000, 0x0003, 0x02, buf);
-
-	ARK3116_SND(serial, 112, 0xFE, 0x40, 0x0082, 0x0003);
-	ARK3116_SND(serial, 113, 0xFE, 0x40, 0x001A, 0x0000);
-	ARK3116_SND(serial, 114, 0xFE, 0x40, 0x0000, 0x0001);
-	ARK3116_SND(serial, 115, 0xFE, 0x40, 0x0002, 0x0003);
-
-	ARK3116_RCV(serial, 116, 0xFE, 0xC0, 0x0000, 0x0004, 0x03, buf);
-	ARK3116_SND(serial, 117, 0xFE, 0x40, 0x0002, 0x0004);
-
-	ARK3116_RCV(serial, 118, 0xFE, 0xC0, 0x0000, 0x0004, 0x02, buf);
-	ARK3116_SND(serial, 119, 0xFE, 0x40, 0x0000, 0x0004);
-
-	ARK3116_RCV(serial, 120, 0xFE, 0xC0, 0x0000, 0x0004, 0x00, buf);
-
-	ARK3116_SND(serial, 121, 0xFE, 0x40, 0x0001, 0x0004);
-
-	ARK3116_RCV(serial, 122, 0xFE, 0xC0, 0x0000, 0x0004, 0x01, buf);
-
-	ARK3116_SND(serial, 123, 0xFE, 0x40, 0x0003, 0x0004);
-
-	/* returns different values (control lines?!) */
-	ARK3116_RCV(serial, 124, 0xFE, 0xC0, 0x0000, 0x0006, 0xFF, buf);
-
-	/* initialise termios */
+	/* setup termios */
 	if (tty)
-		ark3116_set_termios(tty, port, &tmp_termios);
+		ark3116_set_termios(tty, port, NULL);
+
+	/* remove any data still left: also clears error state */
+	ark3116_read_reg(serial, UART_RX, buf);
+
+	/* read modem status */
+	priv->msr = ark3116_read_reg(serial, UART_MSR, buf);
+	/* read line status */
+	priv->lsr = ark3116_read_reg(serial, UART_LSR, buf);
+
+	result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
+	if (result) {
+		dev_err(&port->dev, "submit irq_in urb failed %d\n",
+			result);
+		ark3116_close(port);
+		goto err_out;
+	}
+
+	/* activate interrupts */
+	ark3116_write_reg(port->serial, UART_IER, UART_IER_MSI|UART_IER_RLSI);
+
+	/* enable DMA */
+	ark3116_write_reg(port->serial, UART_FCR, UART_FCR_DMA_SELECT);
 
 err_out:
 	kfree(buf);
-
 	return result;
 }
 
@@ -373,6 +417,7 @@
 			 unsigned int cmd, unsigned long arg)
 {
 	struct usb_serial_port *port = tty->driver_data;
+	struct ark3116_private *priv = usb_get_serial_port_data(port);
 	struct serial_struct serstruct;
 	void __user *user_arg = (void __user *)arg;
 
@@ -394,9 +439,48 @@
 		if (copy_from_user(&serstruct, user_arg, sizeof(serstruct)))
 			return -EFAULT;
 		return 0;
-	default:
-		dbg("%s cmd 0x%04x not supported", __func__, cmd);
+	case TIOCMIWAIT:
+		for (;;) {
+			struct async_icount prev = priv->icount;
+			interruptible_sleep_on(&priv->delta_msr_wait);
+			/* see if a signal did it */
+			if (signal_pending(current))
+				return -ERESTARTSYS;
+			if ((prev.rng == priv->icount.rng) &&
+			    (prev.dsr == priv->icount.dsr) &&
+			    (prev.dcd == priv->icount.dcd) &&
+			    (prev.cts == priv->icount.cts))
+				return -EIO;
+			if ((arg & TIOCM_RNG &&
+			     (prev.rng != priv->icount.rng)) ||
+			    (arg & TIOCM_DSR &&
+			     (prev.dsr != priv->icount.dsr)) ||
+			    (arg & TIOCM_CD  &&
+			     (prev.dcd != priv->icount.dcd)) ||
+			    (arg & TIOCM_CTS &&
+			     (prev.cts != priv->icount.cts)))
+				return 0;
+		}
 		break;
+	case TIOCGICOUNT: {
+		struct serial_icounter_struct icount;
+		struct async_icount cnow = priv->icount;
+		memset(&icount, 0, sizeof(icount));
+		icount.cts = cnow.cts;
+		icount.dsr = cnow.dsr;
+		icount.rng = cnow.rng;
+		icount.dcd = cnow.dcd;
+		icount.rx = cnow.rx;
+		icount.tx = cnow.tx;
+		icount.frame = cnow.frame;
+		icount.overrun = cnow.overrun;
+		icount.parity = cnow.parity;
+		icount.brk = cnow.brk;
+		icount.buf_overrun = cnow.buf_overrun;
+		if (copy_to_user(user_arg, &icount, sizeof(icount)))
+			return -EFAULT;
+		return 0;
+	}
 	}
 
 	return -ENOIOCTLCMD;
@@ -405,32 +489,273 @@
 static int ark3116_tiocmget(struct tty_struct *tty, struct file *file)
 {
 	struct usb_serial_port *port = tty->driver_data;
-	struct usb_serial *serial = port->serial;
-	char *buf;
-	char temp;
+	struct ark3116_private *priv = usb_get_serial_port_data(port);
+	__u32 status;
+	__u32 ctrl;
+	unsigned long flags;
 
-	/* seems like serial port status info (RTS, CTS, ...) is stored
-	 * in reg(?) 0x0006
-	 * pcb connection point 11 = GND -> sets bit4 of response
-	 * pcb connection point  7 = GND -> sets bit6 of response
+	mutex_lock(&priv->hw_lock);
+	ctrl = priv->mcr;
+	mutex_unlock(&priv->hw_lock);
+
+	spin_lock_irqsave(&priv->status_lock, flags);
+	status = priv->msr;
+	spin_unlock_irqrestore(&priv->status_lock, flags);
+
+	return  (status & UART_MSR_DSR  ? TIOCM_DSR  : 0) |
+		(status & UART_MSR_CTS  ? TIOCM_CTS  : 0) |
+		(status & UART_MSR_RI   ? TIOCM_RI   : 0) |
+		(status & UART_MSR_DCD  ? TIOCM_CD   : 0) |
+		(ctrl   & UART_MCR_DTR  ? TIOCM_DTR  : 0) |
+		(ctrl   & UART_MCR_RTS  ? TIOCM_RTS  : 0) |
+		(ctrl   & UART_MCR_OUT1 ? TIOCM_OUT1 : 0) |
+		(ctrl   & UART_MCR_OUT2 ? TIOCM_OUT2 : 0);
+}
+
+static int ark3116_tiocmset(struct tty_struct *tty, struct file *file,
+			unsigned set, unsigned clr)
+{
+	struct usb_serial_port *port = tty->driver_data;
+	struct ark3116_private *priv = usb_get_serial_port_data(port);
+
+	/* we need to take the mutex here, to make sure that the value
+	 * in priv->mcr is actually the one that is in the hardware
 	 */
 
-	buf = kmalloc(1, GFP_KERNEL);
-	if (!buf) {
-		dbg("error kmalloc");
-		return -ENOMEM;
+	mutex_lock(&priv->hw_lock);
+
+	if (set & TIOCM_RTS)
+		priv->mcr |= UART_MCR_RTS;
+	if (set & TIOCM_DTR)
+		priv->mcr |= UART_MCR_DTR;
+	if (set & TIOCM_OUT1)
+		priv->mcr |= UART_MCR_OUT1;
+	if (set & TIOCM_OUT2)
+		priv->mcr |= UART_MCR_OUT2;
+	if (clr & TIOCM_RTS)
+		priv->mcr &= ~UART_MCR_RTS;
+	if (clr & TIOCM_DTR)
+		priv->mcr &= ~UART_MCR_DTR;
+	if (clr & TIOCM_OUT1)
+		priv->mcr &= ~UART_MCR_OUT1;
+	if (clr & TIOCM_OUT2)
+		priv->mcr &= ~UART_MCR_OUT2;
+
+	ark3116_write_reg(port->serial, UART_MCR, priv->mcr);
+
+	mutex_unlock(&priv->hw_lock);
+
+	return 0;
+}
+
+static void ark3116_break_ctl(struct tty_struct *tty, int break_state)
+{
+	struct usb_serial_port *port = tty->driver_data;
+	struct ark3116_private *priv = usb_get_serial_port_data(port);
+
+	/* LCR is also used for other things: protect access */
+	mutex_lock(&priv->hw_lock);
+
+	if (break_state)
+		priv->lcr |= UART_LCR_SBC;
+	else
+		priv->lcr &= ~UART_LCR_SBC;
+
+	ark3116_write_reg(port->serial, UART_LCR, priv->lcr);
+
+	mutex_unlock(&priv->hw_lock);
+}
+
+static void ark3116_update_msr(struct usb_serial_port *port, __u8 msr)
+{
+	struct ark3116_private *priv = usb_get_serial_port_data(port);
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->status_lock, flags);
+	priv->msr = msr;
+	spin_unlock_irqrestore(&priv->status_lock, flags);
+
+	if (msr & UART_MSR_ANY_DELTA) {
+		/* update input line counters */
+		if (msr & UART_MSR_DCTS)
+			priv->icount.cts++;
+		if (msr & UART_MSR_DDSR)
+			priv->icount.dsr++;
+		if (msr & UART_MSR_DDCD)
+			priv->icount.dcd++;
+		if (msr & UART_MSR_TERI)
+			priv->icount.rng++;
+		wake_up_interruptible(&priv->delta_msr_wait);
+	}
+}
+
+static void ark3116_update_lsr(struct usb_serial_port *port, __u8 lsr)
+{
+	struct ark3116_private *priv = usb_get_serial_port_data(port);
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->status_lock, flags);
+	/* combine bits */
+	priv->lsr |= lsr;
+	spin_unlock_irqrestore(&priv->status_lock, flags);
+
+	if (lsr&UART_LSR_BRK_ERROR_BITS) {
+		if (lsr & UART_LSR_BI)
+			priv->icount.brk++;
+		if (lsr & UART_LSR_FE)
+			priv->icount.frame++;
+		if (lsr & UART_LSR_PE)
+			priv->icount.parity++;
+		if (lsr & UART_LSR_OE)
+			priv->icount.overrun++;
+	}
+}
+
+static void ark3116_read_int_callback(struct urb *urb)
+{
+	struct usb_serial_port *port = urb->context;
+	int status = urb->status;
+	const __u8 *data = urb->transfer_buffer;
+	int result;
+
+	switch (status) {
+	case -ECONNRESET:
+	case -ENOENT:
+	case -ESHUTDOWN:
+		/* this urb is terminated, clean up */
+		dbg("%s - urb shutting down with status: %d",
+		    __func__, status);
+		return;
+	default:
+		dbg("%s - nonzero urb status received: %d",
+		    __func__, status);
+		break;
+	case 0: /* success */
+		/* discovered this by trail and error... */
+		if ((urb->actual_length == 4) && (data[0] == 0xe8)) {
+			const __u8 id = data[1]&UART_IIR_ID;
+			dbg("%s: iir=%02x", __func__, data[1]);
+			if (id == UART_IIR_MSI) {
+				dbg("%s: msr=%02x", __func__, data[3]);
+				ark3116_update_msr(port, data[3]);
+				break;
+			} else if (id == UART_IIR_RLSI) {
+				dbg("%s: lsr=%02x", __func__, data[2]);
+				ark3116_update_lsr(port, data[2]);
+				break;
+			}
+		}
+		/*
+		 * Not sure what this data meant...
+		 */
+		usb_serial_debug_data(debug, &port->dev,
+				      __func__,
+				      urb->actual_length,
+				      urb->transfer_buffer);
+		break;
 	}
 
-	/* read register */
-	ARK3116_RCV_QUIET(serial, 0xFE, 0xC0, 0x0000, 0x0006, buf);
-	temp = buf[0];
-	kfree(buf);
+	result = usb_submit_urb(urb, GFP_ATOMIC);
+	if (result)
+		dev_err(&urb->dev->dev,
+			"%s - Error %d submitting interrupt urb\n",
+			__func__, result);
+}
 
-	/* i do not really know if bit4=CTS and bit6=DSR... just a
-	 * quick guess!
-	 */
-	return (temp & (1<<4) ? TIOCM_CTS : 0)
-	       | (temp & (1<<6) ? TIOCM_DSR : 0);
+
+/* Data comes in via the bulk (data) URB, erors/interrupts via the int URB.
+ * This means that we cannot be sure which data byte has an associated error
+ * condition, so we report an error for all data in the next bulk read.
+ *
+ * Actually, there might even be a window between the bulk data leaving the
+ * ark and reading/resetting the lsr in the read_bulk_callback where an
+ * interrupt for the next data block could come in.
+ * Without somekind of ordering on the ark, we would have to report the
+ * error for the next block of data as well...
+ * For now, let's pretend this can't happen.
+ */
+
+static void send_to_tty(struct tty_struct *tty,
+			const unsigned char *chars,
+			size_t size, char flag)
+{
+	if (size == 0)
+		return;
+	if (flag == TTY_NORMAL) {
+		tty_insert_flip_string(tty, chars, size);
+	} else {
+		int i;
+		for (i = 0; i < size; ++i)
+			tty_insert_flip_char(tty, chars[i], flag);
+	}
+}
+
+static void ark3116_read_bulk_callback(struct urb *urb)
+{
+	struct usb_serial_port *port =  urb->context;
+	struct ark3116_private *priv = usb_get_serial_port_data(port);
+	const __u8 *data = urb->transfer_buffer;
+	int status = urb->status;
+	struct tty_struct *tty;
+	unsigned long flags;
+	int result;
+	char flag;
+	__u32 lsr;
+
+	switch (status) {
+	case -ECONNRESET:
+	case -ENOENT:
+	case -ESHUTDOWN:
+		/* this urb is terminated, clean up */
+		dbg("%s - urb shutting down with status: %d",
+		    __func__, status);
+		return;
+	default:
+		dbg("%s - nonzero urb status received: %d",
+		    __func__, status);
+		break;
+	case 0: /* success */
+
+		spin_lock_irqsave(&priv->status_lock, flags);
+		lsr = priv->lsr;
+		/* clear error bits */
+		priv->lsr &= ~UART_LSR_BRK_ERROR_BITS;
+		spin_unlock_irqrestore(&priv->status_lock, flags);
+
+		if (unlikely(lsr & UART_LSR_BI))
+			flag = TTY_BREAK;
+		else if (unlikely(lsr & UART_LSR_PE))
+			flag = TTY_PARITY;
+		else if (unlikely(lsr & UART_LSR_FE))
+			flag = TTY_FRAME;
+		else
+			flag = TTY_NORMAL;
+
+		tty = tty_port_tty_get(&port->port);
+		if (tty) {
+			tty_buffer_request_room(tty, urb->actual_length + 1);
+			/* overrun is special, not associated with a char */
+			if (unlikely(lsr & UART_LSR_OE))
+				tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+			send_to_tty(tty, data, urb->actual_length, flag);
+			tty_flip_buffer_push(tty);
+			tty_kref_put(tty);
+		}
+
+		/* Throttle the device if requested by tty */
+		spin_lock_irqsave(&port->lock, flags);
+		port->throttled = port->throttle_req;
+		if (port->throttled) {
+			spin_unlock_irqrestore(&port->lock, flags);
+			return;
+		} else
+			spin_unlock_irqrestore(&port->lock, flags);
+	}
+	/* Continue reading from device */
+	result = usb_submit_urb(urb, GFP_ATOMIC);
+	if (result)
+		dev_err(&urb->dev->dev, "%s - failed resubmitting"
+			" read urb, error %d\n", __func__, result);
 }
 
 static struct usb_driver ark3116_driver = {
@@ -450,11 +775,17 @@
 	.usb_driver =		&ark3116_driver,
 	.num_ports =		1,
 	.attach =		ark3116_attach,
+	.release =		ark3116_release,
 	.set_termios =		ark3116_set_termios,
 	.init_termios =		ark3116_init_termios,
 	.ioctl =		ark3116_ioctl,
 	.tiocmget =		ark3116_tiocmget,
+	.tiocmset =		ark3116_tiocmset,
 	.open =			ark3116_open,
+	.close =		ark3116_close,
+	.break_ctl = 		ark3116_break_ctl,
+	.read_int_callback = 	ark3116_read_int_callback,
+	.read_bulk_callback =	ark3116_read_bulk_callback,
 };
 
 static int __init ark3116_init(void)
@@ -465,7 +796,12 @@
 	if (retval)
 		return retval;
 	retval = usb_register(&ark3116_driver);
-	if (retval)
+	if (retval == 0) {
+		printk(KERN_INFO "%s:"
+		       DRIVER_VERSION ":"
+		       DRIVER_DESC "\n",
+		       KBUILD_MODNAME);
+	} else
 		usb_serial_deregister(&ark3116_device);
 	return retval;
 }
@@ -480,6 +816,109 @@
 module_exit(ark3116_exit);
 MODULE_LICENSE("GPL");
 
-module_param(debug, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Debug enabled or not");
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
 
+module_param(debug, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Enable debug");
+
+/*
+ * The following describes what I learned from studying the old
+ * ark3116.c driver, disassembling the windows driver, and some lucky
+ * guesses. Since I do not have any datasheet or other
+ * documentation, inaccuracies are almost guaranteed.
+ *
+ * Some specs for the ARK3116 can be found here:
+ * http://web.archive.org/web/20060318000438/
+ *   www.arkmicro.com/en/products/view.php?id=10
+ * On that page, 2 GPIO pins are mentioned: I assume these are the
+ * OUT1 and OUT2 pins of the UART, so I added support for those
+ * through the MCR. Since the pins are not available on my hardware,
+ * I could not verify this.
+ * Also, it states there is "on-chip hardware flow control". I have
+ * discovered how to enable that. Unfortunately, I do not know how to
+ * enable XON/XOFF (software) flow control, which would need support
+ * from the chip as well to work. Because of the wording on the web
+ * page there is a real possibility the chip simply does not support
+ * software flow control.
+ *
+ * I got my ark3116 as part of a mobile phone adapter cable. On the
+ * PCB, the following numbered contacts are present:
+ *
+ *  1:- +5V
+ *  2:o DTR
+ *  3:i RX
+ *  4:i DCD
+ *  5:o RTS
+ *  6:o TX
+ *  7:i RI
+ *  8:i DSR
+ * 10:- 0V
+ * 11:i CTS
+ *
+ * On my chip, all signals seem to be 3.3V, but 5V tolerant. But that
+ * may be different for the one you have ;-).
+ *
+ * The windows driver limits the registers to 0-F, so I assume there
+ * are actually 16 present on the device.
+ *
+ * On an UART interrupt, 4 bytes of data come in on the interrupt
+ * endpoint. The bytes are 0xe8 IIR LSR MSR.
+ *
+ * The baudrate seems to be generated from the 12MHz crystal, using
+ * 4-times subsampling. So quot=12e6/(4*baud). Also see description
+ * of register E.
+ *
+ * Registers 0-7:
+ * These seem to be the same as for a regular 16450. The FCR is set
+ * to UART_FCR_DMA_SELECT (0x8), I guess to enable transfers between
+ * the UART and the USB bridge/DMA engine.
+ *
+ * Register 8:
+ * By trial and error, I found out that bit 0 enables hardware CTS,
+ * stopping TX when CTS is +5V. Bit 1 does the same for RTS, making
+ * RTS +5V when the 3116 cannot transfer the data to the USB bus
+ * (verified by disabling the reading URB). Note that as far as I can
+ * tell, the windows driver does NOT use this, so there might be some
+ * hardware bug or something.
+ *
+ * According to a patch provided here
+ * (http://lkml.org/lkml/2009/7/26/56), the ARK3116 can also be used
+ * as an IrDA dongle. Since I do not have such a thing, I could not
+ * investigate that aspect. However, I can speculate ;-).
+ *
+ * - IrDA encodes data differently than RS232. Most likely, one of
+ *   the bits in registers 9..E enables the IR ENDEC (encoder/decoder).
+ * - Depending on the IR transceiver, the input and output need to be
+ *   inverted, so there are probably bits for that as well.
+ * - IrDA is half-duplex, so there should be a bit for selecting that.
+ *
+ * This still leaves at least two registers unaccounted for. Perhaps
+ * The chip can do XON/XOFF or CRC in HW?
+ *
+ * Register 9:
+ * Set to 0x00 for IrDA, when the baudrate is initialised.
+ *
+ * Register A:
+ * Set to 0x01 for IrDA, at init.
+ *
+ * Register B:
+ * Set to 0x01 for IrDA, 0x00 for RS232, at init.
+ *
+ * Register C:
+ * Set to 00 for IrDA, at init.
+ *
+ * Register D:
+ * Set to 0x41 for IrDA, at init.
+ *
+ * Register E:
+ * Somekind of baudrate override. The windows driver seems to set
+ * this to 0x00 for normal baudrates, 0x01 for 460800, 0x02 for 921600.
+ * Since 460800 and 921600 cannot be obtained by dividing 3MHz by an integer,
+ * it could be somekind of subdivisor thingy.
+ * However,it does not seem to do anything: selecting 921600 (divisor 3,
+ * reg E=2), still gets 1 MHz. I also checked if registers 9, C or F would
+ * work, but they don't.
+ *
+ * Register F: unknown
+ */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index ebcc6d0..f99498f 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -598,6 +598,20 @@
 	{ USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
 	{ USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
 	{ USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) },
+	{ USB_DEVICE(BANDB_VID, BANDB_USOPTL4_PID) },
+	{ USB_DEVICE(BANDB_VID, BANDB_USPTL4_PID) },
+	{ USB_DEVICE(BANDB_VID, BANDB_USO9ML2DR_2_PID) },
+	{ USB_DEVICE(BANDB_VID, BANDB_USO9ML2DR_PID) },
+	{ USB_DEVICE(BANDB_VID, BANDB_USOPTL4DR2_PID) },
+	{ USB_DEVICE(BANDB_VID, BANDB_USOPTL4DR_PID) },
+	{ USB_DEVICE(BANDB_VID, BANDB_485USB9F_2W_PID) },
+	{ USB_DEVICE(BANDB_VID, BANDB_485USB9F_4W_PID) },
+	{ USB_DEVICE(BANDB_VID, BANDB_232USB9M_PID) },
+	{ USB_DEVICE(BANDB_VID, BANDB_485USBTB_2W_PID) },
+	{ USB_DEVICE(BANDB_VID, BANDB_485USBTB_4W_PID) },
+	{ USB_DEVICE(BANDB_VID, BANDB_TTL5USB9M_PID) },
+	{ USB_DEVICE(BANDB_VID, BANDB_TTL3USB9M_PID) },
+	{ USB_DEVICE(BANDB_VID, BANDB_ZZ_PROG1_USB_PID) },
 	{ USB_DEVICE(FTDI_VID, EVER_ECO_PRO_CDS) },
 	{ USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_1_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_2_PID) },
@@ -2195,15 +2209,21 @@
 
 	/* Set number of data bits, parity, stop bits */
 
-	termios->c_cflag &= ~CMSPAR;
-
 	urb_value = 0;
 	urb_value |= (cflag & CSTOPB ? FTDI_SIO_SET_DATA_STOP_BITS_2 :
 		      FTDI_SIO_SET_DATA_STOP_BITS_1);
-	urb_value |= (cflag & PARENB ?
-		      (cflag & PARODD ? FTDI_SIO_SET_DATA_PARITY_ODD :
-		       FTDI_SIO_SET_DATA_PARITY_EVEN) :
-		      FTDI_SIO_SET_DATA_PARITY_NONE);
+	if (cflag & PARENB) {
+		if (cflag & CMSPAR)
+			urb_value |= cflag & PARODD ?
+				     FTDI_SIO_SET_DATA_PARITY_MARK :
+				     FTDI_SIO_SET_DATA_PARITY_SPACE;
+		else
+			urb_value |= cflag & PARODD ?
+				     FTDI_SIO_SET_DATA_PARITY_ODD :
+				     FTDI_SIO_SET_DATA_PARITY_EVEN;
+	} else {
+		urb_value |= FTDI_SIO_SET_DATA_PARITY_NONE;
+	}
 	if (cflag & CSIZE) {
 		switch (cflag & CSIZE) {
 		case CS5: urb_value |= 5; dbg("Setting CS5"); break;
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 6f31e0d..4586a24 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -662,6 +662,20 @@
 #define BANDB_USOTL4_PID	0xAC01	/* USOTL4 Isolated RS-485 Converter */
 #define BANDB_USTL4_PID		0xAC02	/* USTL4 RS-485 Converter */
 #define BANDB_USO9ML2_PID	0xAC03	/* USO9ML2 Isolated RS-232 Converter */
+#define BANDB_USOPTL4_PID	0xAC11
+#define BANDB_USPTL4_PID	0xAC12
+#define BANDB_USO9ML2DR_2_PID	0xAC16
+#define BANDB_USO9ML2DR_PID	0xAC17
+#define BANDB_USOPTL4DR2_PID	0xAC18	/* USOPTL4R-2 2-port Isolated RS-232 Converter */
+#define BANDB_USOPTL4DR_PID	0xAC19
+#define BANDB_485USB9F_2W_PID	0xAC25
+#define BANDB_485USB9F_4W_PID	0xAC26
+#define BANDB_232USB9M_PID	0xAC27
+#define BANDB_485USBTB_2W_PID	0xAC33
+#define BANDB_485USBTB_4W_PID	0xAC34
+#define BANDB_TTL5USB9M_PID	0xAC49
+#define BANDB_TTL3USB9M_PID	0xAC50
+#define BANDB_ZZ_PROG1_USB_PID	0xBA02
 
 /*
  * RM Michaelides CANview USB (http://www.rmcan.com)
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index f11abf5..485fa9c 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -121,8 +121,14 @@
  * moschip_id_table_combined
  */
 #define USB_VENDOR_ID_BANDB             0x0856
-#define BANDB_DEVICE_ID_USOPTL4_4       0xAC44
+#define BANDB_DEVICE_ID_USO9ML2_2	0xAC22
+#define BANDB_DEVICE_ID_USO9ML2_4	0xAC24
+#define BANDB_DEVICE_ID_US9ML2_2	0xAC29
+#define BANDB_DEVICE_ID_US9ML2_4	0xAC30
+#define BANDB_DEVICE_ID_USPTL4_2	0xAC31
+#define BANDB_DEVICE_ID_USPTL4_4	0xAC32
 #define BANDB_DEVICE_ID_USOPTL4_2       0xAC42
+#define BANDB_DEVICE_ID_USOPTL4_4       0xAC44
 
 /* This driver also supports
  * ATEN UC2324 device using Moschip MCS7840
@@ -177,8 +183,14 @@
 static struct usb_device_id moschip_port_id_table[] = {
 	{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
 	{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
-	{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
+	{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
+	{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
+	{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
+	{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
+	{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
+	{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
 	{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
+	{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
 	{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
 	{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
 	{}			/* terminating entry */
@@ -187,8 +199,14 @@
 static __devinitdata struct usb_device_id moschip_id_table_combined[] = {
 	{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
 	{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
-	{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
+	{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
+	{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
+	{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
+	{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
+	{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
+	{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
 	{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
+	{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
 	{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
 	{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
 	{}			/* terminating entry */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 0577e4b..9a2b903 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -580,12 +580,48 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0144, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0145, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0146, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0148, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0149, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0150, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0151, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0154, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
@@ -599,6 +635,7 @@
 	{ USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) },
 	{ USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */
 	{ USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
+	{ USB_DEVICE(ALINK_VENDOR_ID, 0xce16) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) },
 	{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
@@ -1312,7 +1349,7 @@
 
 	dbg("%s entered", __func__);
 
-	if (serial->dev->auto_pm) {
+	if (message.event & PM_EVENT_AUTO) {
 		spin_lock_irq(&intfdata->susp_lock);
 		b = intfdata->in_flight;
 		spin_unlock_irq(&intfdata->susp_lock);
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 5019325..ac1b644 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -16,8 +16,9 @@
   Portions based on the option driver by Matthias Urlichs <smurf@smurf.noris.de>
   Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org>
 */
-
-#define DRIVER_VERSION "v.1.3.8"
+/* Uncomment to log function calls */
+/* #define DEBUG */
+#define DRIVER_VERSION "v.1.7.16"
 #define DRIVER_AUTHOR "Kevin Lloyd, Elina Pasheva, Matthew Safar, Rory Filer"
 #define DRIVER_DESC "USB Driver for Sierra Wireless USB modems"
 
@@ -33,8 +34,10 @@
 #define SWIMS_USB_REQUEST_SetPower	0x00
 #define SWIMS_USB_REQUEST_SetNmea	0x07
 
-#define N_IN_URB	8
-#define N_OUT_URB	64
+#define N_IN_URB_HM	8
+#define N_OUT_URB_HM	64
+#define N_IN_URB	4
+#define N_OUT_URB	4
 #define IN_BUFLEN	4096
 
 #define MAX_TRANSFER		(PAGE_SIZE - 512)
@@ -124,6 +127,23 @@
 	return 0;
 }
 
+static int is_himemory(const u8 ifnum,
+				const struct sierra_iface_info *himemorylist)
+{
+	const u8  *info;
+	int i;
+
+	if (himemorylist) {
+		info = himemorylist->ifaceinfo;
+
+		for (i=0; i < himemorylist->infolen; i++) {
+			if (info[i] == ifnum)
+				return 1;
+		}
+	}
+	return 0;
+}
+
 static int sierra_calc_interface(struct usb_serial *serial)
 {
 	int interface;
@@ -186,6 +206,20 @@
 	return result;
 }
 
+/* interfaces with higher memory requirements */
+static const u8 hi_memory_typeA_ifaces[] = { 0, 2 };
+static const struct sierra_iface_info typeA_interface_list = {
+	.infolen = ARRAY_SIZE(hi_memory_typeA_ifaces),
+	.ifaceinfo = hi_memory_typeA_ifaces,
+};
+
+static const u8 hi_memory_typeB_ifaces[] = { 3, 4, 5, 6 };
+static const struct sierra_iface_info typeB_interface_list = {
+	.infolen = ARRAY_SIZE(hi_memory_typeB_ifaces),
+	.ifaceinfo = hi_memory_typeB_ifaces,
+};
+
+/* 'blacklist' of interfaces not served by this driver */
 static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11 };
 static const struct sierra_iface_info direct_ip_interface_blacklist = {
 	.infolen = ARRAY_SIZE(direct_ip_non_serial_ifaces),
@@ -286,8 +320,10 @@
 	struct usb_anchor active;
 	struct usb_anchor delayed;
 
+	int num_out_urbs;
+	int num_in_urbs;
 	/* Input endpoints and buffers for this port */
-	struct urb *in_urbs[N_IN_URB];
+	struct urb *in_urbs[N_IN_URB_HM];
 
 	/* Settings for the port */
 	int rts_state;	/* Handshaking pins (outputs) */
@@ -460,7 +496,7 @@
 	spin_lock_irqsave(&portdata->lock, flags);
 	dev_dbg(&port->dev, "%s - outstanding_urbs: %d\n", __func__,
 		portdata->outstanding_urbs);
-	if (portdata->outstanding_urbs > N_OUT_URB) {
+	if (portdata->outstanding_urbs > portdata->num_out_urbs) {
 		spin_unlock_irqrestore(&portdata->lock, flags);
 		dev_dbg(&port->dev, "%s - write limit hit\n", __func__);
 		return 0;
@@ -665,7 +701,7 @@
 	/* try to give a good number back based on if we have any free urbs at
 	 * this point in time */
 	spin_lock_irqsave(&portdata->lock, flags);
-	if (portdata->outstanding_urbs > N_OUT_URB * 2 / 3) {
+	if (portdata->outstanding_urbs > (portdata->num_out_urbs * 2) / 3) {
 		spin_unlock_irqrestore(&portdata->lock, flags);
 		dev_dbg(&port->dev, "%s - write limit hit\n", __func__);
 		return 0;
@@ -680,7 +716,7 @@
 	int i;
 	struct sierra_port_private *portdata = usb_get_serial_port_data(port);
 
-	for (i = 0; i < ARRAY_SIZE(portdata->in_urbs); i++)
+	for (i = 0; i < portdata->num_in_urbs; i++)
 		usb_kill_urb(portdata->in_urbs[i]);
 
 	usb_kill_urb(port->interrupt_in_urb);
@@ -695,7 +731,7 @@
 	struct sierra_port_private *portdata = usb_get_serial_port_data(port);
 
 	ok_cnt = 0;
-	for (i = 0; i < ARRAY_SIZE(portdata->in_urbs); i++) {
+	for (i = 0; i < portdata->num_in_urbs; i++) {
 		urb = portdata->in_urbs[i];
 		if (!urb)
 			continue;
@@ -791,7 +827,7 @@
 		/* Stop reading urbs */
 		sierra_stop_rx_urbs(port);
 		/* .. and release them */
-		for (i = 0; i < N_IN_URB; i++) {
+		for (i = 0; i < portdata->num_in_urbs; i++) {
 			sierra_release_urb(portdata->in_urbs[i]);
 			portdata->in_urbs[i] = NULL;
 		}
@@ -818,7 +854,7 @@
 
 
 	endpoint = port->bulk_in_endpointAddress;
-	for (i = 0; i < ARRAY_SIZE(portdata->in_urbs); i++) {
+	for (i = 0; i < portdata->num_in_urbs; i++) {
 		urb = sierra_setup_urb(serial, endpoint, USB_DIR_IN, port,
 					IN_BUFLEN, GFP_KERNEL,
 					sierra_indat_callback);
@@ -869,7 +905,9 @@
 {
 	struct usb_serial_port *port;
 	struct sierra_port_private *portdata;
+	struct sierra_iface_info *himemoryp = NULL;
 	int i;
+	u8 ifnum;
 
 	dev_dbg(&serial->dev->dev, "%s\n", __func__);
 
@@ -886,13 +924,40 @@
 		portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
 		if (!portdata) {
 			dev_dbg(&port->dev, "%s: kmalloc for "
-				"sierra_port_private (%d) failed!.\n",
+				"sierra_port_private (%d) failed!\n",
 				__func__, i);
 			return -ENOMEM;
 		}
 		spin_lock_init(&portdata->lock);
 		init_usb_anchor(&portdata->active);
 		init_usb_anchor(&portdata->delayed);
+		ifnum = i;
+		/* Assume low memory requirements */
+		portdata->num_out_urbs = N_OUT_URB;
+		portdata->num_in_urbs  = N_IN_URB;
+
+		/* Determine actual memory requirements */
+		if (serial->num_ports == 1) {
+			/* Get interface number for composite device */
+			ifnum = sierra_calc_interface(serial);
+			himemoryp =
+			    (struct sierra_iface_info *)&typeB_interface_list;
+			if (is_himemory(ifnum, himemoryp)) {
+				portdata->num_out_urbs = N_OUT_URB_HM;
+				portdata->num_in_urbs  = N_IN_URB_HM;
+			}
+		}
+		else {
+			himemoryp =
+			    (struct sierra_iface_info *)&typeA_interface_list;
+			if (is_himemory(i, himemoryp)) {
+				portdata->num_out_urbs = N_OUT_URB_HM;
+				portdata->num_in_urbs  = N_IN_URB_HM;
+			}
+		}
+		dev_dbg(&serial->dev->dev,
+			"Memory usage (urbs) interface #%d, in=%d, out=%d\n",
+			ifnum,portdata->num_in_urbs, portdata->num_out_urbs );
 		/* Set the port private data pointer */
 		usb_set_serial_port_data(port, portdata);
 	}
@@ -940,7 +1005,7 @@
 	struct sierra_intf_private *intfdata;
 	int b;
 
-	if (serial->dev->auto_pm) {
+	if (message.event & PM_EVENT_AUTO) {
 		intfdata = serial->private;
 		spin_lock_irq(&intfdata->susp_lock);
 		b = intfdata->in_flight;
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index cfa26d5..e5e6df3 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -73,7 +73,8 @@
 
 static const char* host_info(struct Scsi_Host *host)
 {
-	return "SCSI emulation for USB Mass Storage devices";
+	struct us_data *us = host_to_us(host);
+	return us->scsi_name;
 }
 
 static int slave_alloc (struct scsi_device *sdev)
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 589f6b4..cc313d1 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -666,10 +666,11 @@
 	 * to wait for at least one CHECK_CONDITION to determine
 	 * SANE_SENSE support
 	 */
-	if ((srb->cmnd[0] == ATA_16 || srb->cmnd[0] == ATA_12) &&
+	if (unlikely((srb->cmnd[0] == ATA_16 || srb->cmnd[0] == ATA_12) &&
 	    result == USB_STOR_TRANSPORT_GOOD &&
 	    !(us->fflags & US_FL_SANE_SENSE) &&
-	    !(srb->cmnd[2] & 0x20)) {
+	    !(us->fflags & US_FL_BAD_SENSE) &&
+	    !(srb->cmnd[2] & 0x20))) {
 		US_DEBUGP("-- SAT supported, increasing auto-sense\n");
 		us->fflags |= US_FL_SANE_SENSE;
 	}
@@ -718,6 +719,12 @@
 		if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
 			US_DEBUGP("-- auto-sense aborted\n");
 			srb->result = DID_ABORT << 16;
+
+			/* If SANE_SENSE caused this problem, disable it */
+			if (sense_size != US_SENSE_SIZE) {
+				us->fflags &= ~US_FL_SANE_SENSE;
+				us->fflags |= US_FL_BAD_SENSE;
+			}
 			goto Handle_Errors;
 		}
 
@@ -727,10 +734,11 @@
 		 * (small) sense request. This fixes some USB GSM modems
 		 */
 		if (temp_result == USB_STOR_TRANSPORT_FAILED &&
-		    (us->fflags & US_FL_SANE_SENSE) &&
-		    sense_size != US_SENSE_SIZE) {
+				sense_size != US_SENSE_SIZE) {
 			US_DEBUGP("-- auto-sense failure, retry small sense\n");
 			sense_size = US_SENSE_SIZE;
+			us->fflags &= ~US_FL_SANE_SENSE;
+			us->fflags |= US_FL_BAD_SENSE;
 			goto Retry_Sense;
 		}
 
@@ -754,6 +762,7 @@
 		 */
 		if (srb->sense_buffer[7] > (US_SENSE_SIZE - 8) &&
 		    !(us->fflags & US_FL_SANE_SENSE) &&
+		    !(us->fflags & US_FL_BAD_SENSE) &&
 		    (srb->sense_buffer[0] & 0x7C) == 0x70) {
 			US_DEBUGP("-- SANE_SENSE support enabled\n");
 			us->fflags |= US_FL_SANE_SENSE;
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index d4f034e..64a0a2c 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -818,6 +818,13 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_FIX_CAPACITY ),
 
+/* Reported by Daniel Kukula <daniel.kuku@gmail.com> */
+UNUSUAL_DEV( 0x067b, 0x1063, 0x0100, 0x0100,
+		"Prolific Technology, Inc.",
+		"Prolific Storage Gadget",
+		US_SC_DEVICE, US_PR_DEVICE, NULL,
+		US_FL_BAD_SENSE ),
+
 /* Reported by Rogerio Brito <rbrito@ime.usp.br> */
 UNUSUAL_DEV( 0x067b, 0x2317, 0x0001, 0x001,
 		"Prolific Technology, Inc.",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 8060b85..5a53d4f 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -45,6 +45,10 @@
  * 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#ifdef CONFIG_USB_STORAGE_DEBUG
+#define DEBUG
+#endif
+
 #include <linux/sched.h>
 #include <linux/errno.h>
 #include <linux/freezer.h>
@@ -228,6 +232,7 @@
 	if (data_len<36) // You lose.
 		return;
 
+	memset(data+8, ' ', 28);
 	if(data[0]&0x20) { /* USB device currently not connected. Return
 			      peripheral qualifier 001b ("...however, the
 			      physical device is not currently connected
@@ -237,15 +242,15 @@
 			      device, it may return zeros or ASCII spaces 
 			      (20h) in those fields until the data is
 			      available from the device."). */
-		memset(data+8,0,28);
 	} else {
 		u16 bcdDevice = le16_to_cpu(us->pusb_dev->descriptor.bcdDevice);
-		memcpy(data+8, us->unusual_dev->vendorName, 
-			strlen(us->unusual_dev->vendorName) > 8 ? 8 :
-			strlen(us->unusual_dev->vendorName));
-		memcpy(data+16, us->unusual_dev->productName, 
-			strlen(us->unusual_dev->productName) > 16 ? 16 :
-			strlen(us->unusual_dev->productName));
+		int n;
+
+		n = strlen(us->unusual_dev->vendorName);
+		memcpy(data+8, us->unusual_dev->vendorName, min(8, n));
+		n = strlen(us->unusual_dev->productName);
+		memcpy(data+16, us->unusual_dev->productName, min(16, n));
+
 		data[32] = 0x30 + ((bcdDevice>>12) & 0x0F);
 		data[33] = 0x30 + ((bcdDevice>>8) & 0x0F);
 		data[34] = 0x30 + ((bcdDevice>>4) & 0x0F);
@@ -459,6 +464,9 @@
 		case 'a':
 			f |= US_FL_SANE_SENSE;
 			break;
+		case 'b':
+			f |= US_FL_BAD_SENSE;
+			break;
 		case 'c':
 			f |= US_FL_FIX_CAPACITY;
 			break;
@@ -808,14 +816,13 @@
 {
 	struct us_data *us = (struct us_data *)__us;
 
-	printk(KERN_DEBUG
-		"usb-storage: device found at %d\n", us->pusb_dev->devnum);
+	dev_dbg(&us->pusb_intf->dev, "device found\n");
 
 	set_freezable();
 	/* Wait for the timeout to expire or for a disconnect */
 	if (delay_use > 0) {
-		printk(KERN_DEBUG "usb-storage: waiting for device "
-				"to settle before scanning\n");
+		dev_dbg(&us->pusb_intf->dev, "waiting for device to settle "
+				"before scanning\n");
 		wait_event_freezable_timeout(us->delay_wait,
 				test_bit(US_FLIDX_DONT_SCAN, &us->dflags),
 				delay_use * HZ);
@@ -832,7 +839,7 @@
 			mutex_unlock(&us->dev_mutex);
 		}
 		scsi_scan_host(us_to_host(us));
-		printk(KERN_DEBUG "usb-storage: device scan complete\n");
+		dev_dbg(&us->pusb_intf->dev, "scan complete\n");
 
 		/* Should we unbind if no devices were detected? */
 	}
@@ -840,6 +847,15 @@
 	complete_and_exit(&us->scanning_done, 0);
 }
 
+static unsigned int usb_stor_sg_tablesize(struct usb_interface *intf)
+{
+	struct usb_device *usb_dev = interface_to_usbdev(intf);
+
+	if (usb_dev->bus->sg_tablesize) {
+		return usb_dev->bus->sg_tablesize;
+	}
+	return SG_ALL;
+}
 
 /* First part of general USB mass-storage probing */
 int usb_stor_probe1(struct us_data **pus,
@@ -868,6 +884,7 @@
 	 * Allow 16-byte CDBs and thus > 2TB
 	 */
 	host->max_cmd_len = 16;
+	host->sg_tablesize = usb_stor_sg_tablesize(intf);
 	*pus = us = host_to_us(host);
 	memset(us, 0, sizeof(struct us_data));
 	mutex_init(&(us->dev_mutex));
@@ -929,6 +946,8 @@
 	result = usb_stor_acquire_resources(us);
 	if (result)
 		goto BadDevice;
+	snprintf(us->scsi_name, sizeof(us->scsi_name), "usb-storage %s",
+					dev_name(&us->pusb_intf->dev));
 	result = scsi_add_host(us_to_host(us), &us->pusb_intf->dev);
 	if (result) {
 		printk(KERN_WARNING USB_STORAGE
diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
index 2609efb..6971713 100644
--- a/drivers/usb/storage/usb.h
+++ b/drivers/usb/storage/usb.h
@@ -132,6 +132,7 @@
 	/* SCSI interfaces */
 	struct scsi_cmnd	*srb;		 /* current srb		*/
 	unsigned int		tag;		 /* current dCBWTag	*/
+	char			scsi_name[32];	 /* scsi_host name	*/
 
 	/* control and bulk communications data */
 	struct urb		*current_urb;	 /* USB requests	 */
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index b62f2bc..b1e579c 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -358,7 +358,7 @@
 		rv = skel_do_read_io(dev, count);
 		if (rv < 0)
 			goto exit;
-		else if (!file->f_flags & O_NONBLOCK)
+		else if (!(file->f_flags & O_NONBLOCK))
 			goto retry;
 		rv = -EAGAIN;
 	}
@@ -411,7 +411,7 @@
 	 * limit the number of URBs in flight to stop a user from using up all
 	 * RAM
 	 */
-	if (!file->f_flags & O_NONBLOCK) {
+	if (!(file->f_flags & O_NONBLOCK)) {
 		if (down_interruptible(&dev->limit_sem)) {
 			retval = -ERESTARTSYS;
 			goto exit;
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
index 4ac4300..dced419 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -119,10 +119,12 @@
 	urb = usb_alloc_urb(0, GFP_KERNEL);
 	if (urb == NULL)
 		goto err;
+	wusb_dev->set_gtk_urb = urb;
 
-	req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
+	req = kmalloc(sizeof(*req), GFP_KERNEL);
 	if (req == NULL)
 		goto err;
+	wusb_dev->set_gtk_req = req;
 
 	req->bRequestType = USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE;
 	req->bRequest = USB_REQ_SET_DESCRIPTOR;
@@ -130,9 +132,6 @@
 	req->wIndex = 0;
 	req->wLength = cpu_to_le16(wusbhc->gtk.descr.bLength);
 
-	wusb_dev->set_gtk_urb = urb;
-	wusb_dev->set_gtk_req = req;
-
 	return wusb_dev;
 err:
 	wusb_dev_free(wusb_dev);
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
index 4516c36..edcd2d7 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/usb/wusbcore/security.c
@@ -205,15 +205,15 @@
 	const void *itr, *top;
 	char buf[64];
 
-	secd = kmalloc(sizeof(struct usb_security_descriptor), GFP_KERNEL);
+	secd = kmalloc(sizeof(*secd), GFP_KERNEL);
 	if (secd == NULL) {
 		result = -ENOMEM;
 		goto out;
 	}
 
 	result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
-				    0, secd, sizeof(struct usb_security_descriptor));
-	if (result < sizeof(secd)) {
+				    0, secd, sizeof(*secd));
+	if (result < sizeof(*secd)) {
 		dev_err(dev, "Can't read security descriptor or "
 			"not enough data: %d\n", result);
 		goto out;
diff --git a/drivers/usb/wusbcore/wusbhc.c b/drivers/usb/wusbcore/wusbhc.c
index ee6256f..eab86e4 100644
--- a/drivers/usb/wusbcore/wusbhc.c
+++ b/drivers/usb/wusbcore/wusbhc.c
@@ -147,10 +147,40 @@
 }
 static DEVICE_ATTR(wusb_chid, 0644, wusb_chid_show, wusb_chid_store);
 
+
+static ssize_t wusb_phy_rate_show(struct device *dev,
+				  struct device_attribute *attr,
+				  char *buf)
+{
+	struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+
+	return sprintf(buf, "%d\n", wusbhc->phy_rate);
+}
+
+static ssize_t wusb_phy_rate_store(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t size)
+{
+	struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+	uint8_t phy_rate;
+	ssize_t result;
+
+	result = sscanf(buf, "%hhu", &phy_rate);
+	if (result != 1)
+		return -EINVAL;
+	if (phy_rate >= UWB_PHY_RATE_INVALID)
+		return -EINVAL;
+
+	wusbhc->phy_rate = phy_rate;
+	return size;
+}
+static DEVICE_ATTR(wusb_phy_rate, 0644, wusb_phy_rate_show, wusb_phy_rate_store);
+
 /* Group all the WUSBHC attributes */
 static struct attribute *wusbhc_attrs[] = {
 		&dev_attr_wusb_trust_timeout.attr,
 		&dev_attr_wusb_chid.attr,
+		&dev_attr_wusb_phy_rate.attr,
 		NULL,
 };
 
@@ -177,6 +207,8 @@
 	int result = 0;
 
 	wusbhc->trust_timeout = WUSB_TRUST_TIMEOUT_MS;
+	wusbhc->phy_rate = UWB_PHY_RATE_INVALID - 1;
+
 	mutex_init(&wusbhc->mutex);
 	result = wusbhc_mmcie_create(wusbhc);
 	if (result < 0)
diff --git a/drivers/usb/wusbcore/wusbhc.h b/drivers/usb/wusbcore/wusbhc.h
index 797c245..fd2fd4e 100644
--- a/drivers/usb/wusbcore/wusbhc.h
+++ b/drivers/usb/wusbcore/wusbhc.h
@@ -253,6 +253,7 @@
 
 	unsigned trust_timeout;			/* in jiffies */
 	struct wusb_ckhdid chid;
+	uint8_t phy_rate;
 	struct wuie_host_info *wuie_host_info;
 
 	struct mutex mutex;			/* locks everything else */
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index e9f193e..bb5fbed 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2165,6 +2165,7 @@
 	  a bridge adapter.
 
 source "drivers/video/omap/Kconfig"
+source "drivers/video/omap2/Kconfig"
 
 source "drivers/video/backlight/Kconfig"
 source "drivers/video/display/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 80232e1..0f8da33 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -124,6 +124,7 @@
 obj-$(CONFIG_FB_XILINX)           += xilinxfb.o
 obj-$(CONFIG_FB_SH_MOBILE_LCDC)	  += sh_mobile_lcdcfb.o
 obj-$(CONFIG_FB_OMAP)             += omap/
+obj-y                             += omap2/
 obj-$(CONFIG_XEN_FBDEV_FRONTEND)  += xen-fbfront.o
 obj-$(CONFIG_FB_CARMINE)          += carminefb.o
 obj-$(CONFIG_FB_MB862XX)	  += mb862xx/
diff --git a/drivers/video/omap/Kconfig b/drivers/video/omap/Kconfig
index 551e3e9..455c605 100644
--- a/drivers/video/omap/Kconfig
+++ b/drivers/video/omap/Kconfig
@@ -1,6 +1,7 @@
 config FB_OMAP
 	tristate "OMAP frame buffer support (EXPERIMENTAL)"
-	depends on FB && ARCH_OMAP
+	depends on FB && ARCH_OMAP && (OMAP2_DSS = "n")
+
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
 	select FB_CFB_IMAGEBLIT
@@ -72,7 +73,7 @@
 
 config FB_OMAP_BOOTLOADER_INIT
 	bool "Check bootloader initialization"
-	depends on FB_OMAP
+	depends on FB_OMAP || FB_OMAP2
 	help
 	  Say Y here if you want to enable checking if the bootloader has
 	  already initialized the display controller. In this case the
diff --git a/drivers/video/omap/blizzard.c b/drivers/video/omap/blizzard.c
index f5d75f2..2ffb34a 100644
--- a/drivers/video/omap/blizzard.c
+++ b/drivers/video/omap/blizzard.c
@@ -27,9 +27,9 @@
 #include <linux/clk.h>
 
 #include <plat/dma.h>
-#include <plat/omapfb.h>
 #include <plat/blizzard.h>
 
+#include "omapfb.h"
 #include "dispc.h"
 
 #define MODULE_NAME				"blizzard"
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
index 7c833db..c7c6455 100644
--- a/drivers/video/omap/dispc.c
+++ b/drivers/video/omap/dispc.c
@@ -24,11 +24,12 @@
 #include <linux/vmalloc.h>
 #include <linux/clk.h>
 #include <linux/io.h>
+#include <linux/platform_device.h>
 
 #include <plat/sram.h>
-#include <plat/omapfb.h>
 #include <plat/board.h>
 
+#include "omapfb.h"
 #include "dispc.h"
 
 #define MODULE_NAME			"dispc"
@@ -188,6 +189,11 @@
 	struct omapfb_color_key	color_key;
 } dispc;
 
+static struct platform_device omapdss_device = {
+	.name		= "omapdss",
+	.id		= -1,
+};
+
 static void enable_lcd_clocks(int enable);
 
 static void inline dispc_write_reg(int idx, u32 val)
@@ -914,20 +920,20 @@
 
 static int get_dss_clocks(void)
 {
-	dispc.dss_ick = clk_get(dispc.fbdev->dev, "ick");
+	dispc.dss_ick = clk_get(&omapdss_device.dev, "ick");
 	if (IS_ERR(dispc.dss_ick)) {
 		dev_err(dispc.fbdev->dev, "can't get ick\n");
 		return PTR_ERR(dispc.dss_ick);
 	}
 
-	dispc.dss1_fck = clk_get(dispc.fbdev->dev, "dss1_fck");
+	dispc.dss1_fck = clk_get(&omapdss_device.dev, "dss1_fck");
 	if (IS_ERR(dispc.dss1_fck)) {
 		dev_err(dispc.fbdev->dev, "can't get dss1_fck\n");
 		clk_put(dispc.dss_ick);
 		return PTR_ERR(dispc.dss1_fck);
 	}
 
-	dispc.dss_54m_fck = clk_get(dispc.fbdev->dev, "tv_fck");
+	dispc.dss_54m_fck = clk_get(&omapdss_device.dev, "tv_fck");
 	if (IS_ERR(dispc.dss_54m_fck)) {
 		dev_err(dispc.fbdev->dev, "can't get tv_fck\n");
 		clk_put(dispc.dss_ick);
@@ -1379,6 +1385,12 @@
 	int skip_init = 0;
 	int i;
 
+	r = platform_device_register(&omapdss_device);
+	if (r) {
+		dev_err(fbdev->dev, "can't register omapdss device\n");
+		return r;
+	}
+
 	memset(&dispc, 0, sizeof(dispc));
 
 	dispc.base = ioremap(DISPC_BASE, SZ_1K);
@@ -1522,6 +1534,7 @@
 	free_irq(INT_24XX_DSS_IRQ, dispc.fbdev);
 	put_dss_clocks();
 	iounmap(dispc.base);
+	platform_device_unregister(&omapdss_device);
 }
 
 const struct lcd_ctrl omap2_int_ctrl = {
diff --git a/drivers/video/omap/hwa742.c b/drivers/video/omap/hwa742.c
index 17a975e..0016f77 100644
--- a/drivers/video/omap/hwa742.c
+++ b/drivers/video/omap/hwa742.c
@@ -25,10 +25,11 @@
 #include <linux/fb.h>
 #include <linux/delay.h>
 #include <linux/clk.h>
+#include <linux/interrupt.h>
 
 #include <plat/dma.h>
-#include <plat/omapfb.h>
 #include <plat/hwa742.h>
+#include "omapfb.h"
 
 #define HWA742_REV_CODE_REG       0x0
 #define HWA742_CONFIG_REG         0x2
diff --git a/drivers/video/omap/lcd_2430sdp.c b/drivers/video/omap/lcd_2430sdp.c
index fea7fee..760645d 100644
--- a/drivers/video/omap/lcd_2430sdp.c
+++ b/drivers/video/omap/lcd_2430sdp.c
@@ -28,9 +28,10 @@
 #include <linux/i2c/twl4030.h>
 
 #include <plat/mux.h>
-#include <plat/omapfb.h>
 #include <asm/mach-types.h>
 
+#include "omapfb.h"
+
 #define SDP2430_LCD_PANEL_BACKLIGHT_GPIO	91
 #define SDP2430_LCD_PANEL_ENABLE_GPIO		154
 #define SDP3430_LCD_PANEL_BACKLIGHT_GPIO	24
diff --git a/drivers/video/omap/lcd_ams_delta.c b/drivers/video/omap/lcd_ams_delta.c
index b3973eb..567db6a 100644
--- a/drivers/video/omap/lcd_ams_delta.c
+++ b/drivers/video/omap/lcd_ams_delta.c
@@ -27,7 +27,8 @@
 
 #include <plat/board-ams-delta.h>
 #include <mach/hardware.h>
-#include <plat/omapfb.h>
+
+#include "omapfb.h"
 
 #define AMS_DELTA_DEFAULT_CONTRAST	112
 
diff --git a/drivers/video/omap/lcd_apollon.c b/drivers/video/omap/lcd_apollon.c
index 4c5cefc..2be94eb 100644
--- a/drivers/video/omap/lcd_apollon.c
+++ b/drivers/video/omap/lcd_apollon.c
@@ -26,7 +26,8 @@
 
 #include <mach/gpio.h>
 #include <plat/mux.h>
-#include <plat/omapfb.h>
+
+#include "omapfb.h"
 
 /* #define USE_35INCH_LCD 1 */
 
diff --git a/drivers/video/omap/lcd_h3.c b/drivers/video/omap/lcd_h3.c
index 240b4fb..8df6887 100644
--- a/drivers/video/omap/lcd_h3.c
+++ b/drivers/video/omap/lcd_h3.c
@@ -24,7 +24,7 @@
 #include <linux/i2c/tps65010.h>
 
 #include <mach/gpio.h>
-#include <plat/omapfb.h>
+#include "omapfb.h"
 
 #define MODULE_NAME	"omapfb-lcd_h3"
 
diff --git a/drivers/video/omap/lcd_h4.c b/drivers/video/omap/lcd_h4.c
index 720625d..03a06a9 100644
--- a/drivers/video/omap/lcd_h4.c
+++ b/drivers/video/omap/lcd_h4.c
@@ -22,7 +22,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 
-#include <plat/omapfb.h>
+#include "omapfb.h"
 
 static int h4_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
 {
diff --git a/drivers/video/omap/lcd_htcherald.c b/drivers/video/omap/lcd_htcherald.c
index 2e0c81e..a9007c5 100644
--- a/drivers/video/omap/lcd_htcherald.c
+++ b/drivers/video/omap/lcd_htcherald.c
@@ -29,7 +29,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 
-#include <plat/omapfb.h>
+#include "omapfb.h"
 
 static int htcherald_panel_init(struct lcd_panel *panel,
 					struct omapfb_device *fbdev)
diff --git a/drivers/video/omap/lcd_inn1510.c b/drivers/video/omap/lcd_inn1510.c
index aafe9b4..3271f16 100644
--- a/drivers/video/omap/lcd_inn1510.c
+++ b/drivers/video/omap/lcd_inn1510.c
@@ -24,7 +24,7 @@
 #include <linux/io.h>
 
 #include <plat/fpga.h>
-#include <plat/omapfb.h>
+#include "omapfb.h"
 
 static int innovator1510_panel_init(struct lcd_panel *panel,
 				    struct omapfb_device *fbdev)
diff --git a/drivers/video/omap/lcd_inn1610.c b/drivers/video/omap/lcd_inn1610.c
index 0de3382..9fff86f 100644
--- a/drivers/video/omap/lcd_inn1610.c
+++ b/drivers/video/omap/lcd_inn1610.c
@@ -23,7 +23,7 @@
 #include <linux/platform_device.h>
 
 #include <mach/gpio.h>
-#include <plat/omapfb.h>
+#include "omapfb.h"
 
 #define MODULE_NAME	"omapfb-lcd_h3"
 
diff --git a/drivers/video/omap/lcd_ldp.c b/drivers/video/omap/lcd_ldp.c
index 6a260df..5bb7f6f 100644
--- a/drivers/video/omap/lcd_ldp.c
+++ b/drivers/video/omap/lcd_ldp.c
@@ -28,9 +28,10 @@
 
 #include <mach/gpio.h>
 #include <plat/mux.h>
-#include <plat/omapfb.h>
 #include <asm/mach-types.h>
 
+#include "omapfb.h"
+
 #define LCD_PANEL_BACKLIGHT_GPIO	(15 + OMAP_MAX_GPIO_LINES)
 #define LCD_PANEL_ENABLE_GPIO		(7 + OMAP_MAX_GPIO_LINES)
 
diff --git a/drivers/video/omap/lcd_mipid.c b/drivers/video/omap/lcd_mipid.c
index 8f3e2b4..abe1c76 100644
--- a/drivers/video/omap/lcd_mipid.c
+++ b/drivers/video/omap/lcd_mipid.c
@@ -23,9 +23,10 @@
 #include <linux/workqueue.h>
 #include <linux/spi/spi.h>
 
-#include <plat/omapfb.h>
 #include <plat/lcd_mipid.h>
 
+#include "omapfb.h"
+
 #define MIPID_MODULE_NAME		"lcd_mipid"
 
 #define MIPID_CMD_READ_DISP_ID		0x04
diff --git a/drivers/video/omap/lcd_omap2evm.c b/drivers/video/omap/lcd_omap2evm.c
index e1a38ab..006c2fe 100644
--- a/drivers/video/omap/lcd_omap2evm.c
+++ b/drivers/video/omap/lcd_omap2evm.c
@@ -27,9 +27,10 @@
 #include <linux/i2c/twl4030.h>
 
 #include <plat/mux.h>
-#include <plat/omapfb.h>
 #include <asm/mach-types.h>
 
+#include "omapfb.h"
+
 #define LCD_PANEL_ENABLE_GPIO	154
 #define LCD_PANEL_LR		128
 #define LCD_PANEL_UD		129
diff --git a/drivers/video/omap/lcd_omap3beagle.c b/drivers/video/omap/lcd_omap3beagle.c
index ccec084..fc503d8 100644
--- a/drivers/video/omap/lcd_omap3beagle.c
+++ b/drivers/video/omap/lcd_omap3beagle.c
@@ -26,9 +26,11 @@
 #include <linux/i2c/twl4030.h>
 
 #include <plat/mux.h>
-#include <plat/omapfb.h>
+#include <plat/mux.h>
 #include <asm/mach-types.h>
 
+#include "omapfb.h"
+
 #define LCD_PANEL_ENABLE_GPIO       170
 
 static int omap3beagle_panel_init(struct lcd_panel *panel,
diff --git a/drivers/video/omap/lcd_omap3evm.c b/drivers/video/omap/lcd_omap3evm.c
index 556eb31..ae2edc4 100644
--- a/drivers/video/omap/lcd_omap3evm.c
+++ b/drivers/video/omap/lcd_omap3evm.c
@@ -26,9 +26,10 @@
 #include <linux/i2c/twl4030.h>
 
 #include <plat/mux.h>
-#include <plat/omapfb.h>
 #include <asm/mach-types.h>
 
+#include "omapfb.h"
+
 #define LCD_PANEL_ENABLE_GPIO       153
 #define LCD_PANEL_LR                2
 #define LCD_PANEL_UD                3
diff --git a/drivers/video/omap/lcd_osk.c b/drivers/video/omap/lcd_osk.c
index bb21d7d..b87e8b8 100644
--- a/drivers/video/omap/lcd_osk.c
+++ b/drivers/video/omap/lcd_osk.c
@@ -25,7 +25,7 @@
 
 #include <mach/gpio.h>
 #include <plat/mux.h>
-#include <plat/omapfb.h>
+#include "omapfb.h"
 
 static int osk_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
 {
diff --git a/drivers/video/omap/lcd_overo.c b/drivers/video/omap/lcd_overo.c
index b0f86e5..56ee192 100644
--- a/drivers/video/omap/lcd_overo.c
+++ b/drivers/video/omap/lcd_overo.c
@@ -25,9 +25,10 @@
 
 #include <mach/gpio.h>
 #include <plat/mux.h>
-#include <plat/omapfb.h>
 #include <asm/mach-types.h>
 
+#include "omapfb.h"
+
 #define LCD_ENABLE       144
 
 static int overo_panel_init(struct lcd_panel *panel,
diff --git a/drivers/video/omap/lcd_palmte.c b/drivers/video/omap/lcd_palmte.c
index d302896..4cb3017 100644
--- a/drivers/video/omap/lcd_palmte.c
+++ b/drivers/video/omap/lcd_palmte.c
@@ -24,7 +24,7 @@
 #include <linux/io.h>
 
 #include <plat/fpga.h>
-#include <plat/omapfb.h>
+#include "omapfb.h"
 
 static int palmte_panel_init(struct lcd_panel *panel,
 				struct omapfb_device *fbdev)
diff --git a/drivers/video/omap/lcd_palmtt.c b/drivers/video/omap/lcd_palmtt.c
index 557424f..ff0e6d7 100644
--- a/drivers/video/omap/lcd_palmtt.c
+++ b/drivers/video/omap/lcd_palmtt.c
@@ -30,7 +30,7 @@
 #include <linux/io.h>
 
 #include <mach/gpio.h>
-#include <plat/omapfb.h>
+#include "omapfb.h"
 
 static int palmtt_panel_init(struct lcd_panel *panel,
 	struct omapfb_device *fbdev)
diff --git a/drivers/video/omap/lcd_palmz71.c b/drivers/video/omap/lcd_palmz71.c
index 5f4b5b2..2334e56 100644
--- a/drivers/video/omap/lcd_palmz71.c
+++ b/drivers/video/omap/lcd_palmz71.c
@@ -24,7 +24,7 @@
 #include <linux/platform_device.h>
 #include <linux/io.h>
 
-#include <plat/omapfb.h>
+#include "omapfb.h"
 
 static int palmz71_panel_init(struct lcd_panel *panel,
 			      struct omapfb_device *fbdev)
diff --git a/drivers/video/omap/lcdc.c b/drivers/video/omap/lcdc.c
index 5f32caf..b831e1d 100644
--- a/drivers/video/omap/lcdc.c
+++ b/drivers/video/omap/lcdc.c
@@ -30,10 +30,11 @@
 #include <linux/clk.h>
 
 #include <plat/dma.h>
-#include <plat/omapfb.h>
 
 #include <asm/mach-types.h>
 
+#include "omapfb.h"
+
 #include "lcdc.h"
 
 #define MODULE_NAME			"lcdc"
diff --git a/drivers/video/omap/omapfb.h b/drivers/video/omap/omapfb.h
new file mode 100644
index 0000000..46e4714
--- /dev/null
+++ b/drivers/video/omap/omapfb.h
@@ -0,0 +1,227 @@
+/*
+ * File: drivers/video/omap/omapfb.h
+ *
+ * Framebuffer driver for TI OMAP boards
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#ifndef __OMAPFB_H
+#define __OMAPFB_H
+
+#include <linux/fb.h>
+#include <linux/mutex.h>
+#include <linux/omapfb.h>
+
+#define OMAPFB_EVENT_READY	1
+#define OMAPFB_EVENT_DISABLED	2
+
+#define OMAP_LCDC_INV_VSYNC             0x0001
+#define OMAP_LCDC_INV_HSYNC             0x0002
+#define OMAP_LCDC_INV_PIX_CLOCK         0x0004
+#define OMAP_LCDC_INV_OUTPUT_EN         0x0008
+#define OMAP_LCDC_HSVS_RISING_EDGE      0x0010
+#define OMAP_LCDC_HSVS_OPPOSITE         0x0020
+
+#define OMAP_LCDC_SIGNAL_MASK		0x003f
+
+#define OMAP_LCDC_PANEL_TFT		0x0100
+
+#define OMAPFB_PLANE_XRES_MIN		8
+#define OMAPFB_PLANE_YRES_MIN		8
+
+struct omapfb_device;
+
+struct lcd_panel {
+	const char	*name;
+	int		config;		/* TFT/STN, signal inversion */
+	int		bpp;		/* Pixel format in fb mem */
+	int		data_lines;	/* Lines on LCD HW interface */
+
+	int		x_res, y_res;
+	int		pixel_clock;	/* In kHz */
+	int		hsw;		/* Horizontal synchronization
+					   pulse width */
+	int		hfp;		/* Horizontal front porch */
+	int		hbp;		/* Horizontal back porch */
+	int		vsw;		/* Vertical synchronization
+					   pulse width */
+	int		vfp;		/* Vertical front porch */
+	int		vbp;		/* Vertical back porch */
+	int		acb;		/* ac-bias pin frequency */
+	int		pcd;		/* pixel clock divider.
+					   Obsolete use pixel_clock instead */
+
+	int		(*init)		(struct lcd_panel *panel,
+					 struct omapfb_device *fbdev);
+	void		(*cleanup)	(struct lcd_panel *panel);
+	int		(*enable)	(struct lcd_panel *panel);
+	void		(*disable)	(struct lcd_panel *panel);
+	unsigned long	(*get_caps)	(struct lcd_panel *panel);
+	int		(*set_bklight_level)(struct lcd_panel *panel,
+					     unsigned int level);
+	unsigned int	(*get_bklight_level)(struct lcd_panel *panel);
+	unsigned int	(*get_bklight_max)  (struct lcd_panel *panel);
+	int		(*run_test)	(struct lcd_panel *panel, int test_num);
+};
+
+struct extif_timings {
+	int cs_on_time;
+	int cs_off_time;
+	int we_on_time;
+	int we_off_time;
+	int re_on_time;
+	int re_off_time;
+	int we_cycle_time;
+	int re_cycle_time;
+	int cs_pulse_width;
+	int access_time;
+
+	int clk_div;
+
+	u32 tim[5];		/* set by extif->convert_timings */
+
+	int converted;
+};
+
+struct lcd_ctrl_extif {
+	int  (*init)		(struct omapfb_device *fbdev);
+	void (*cleanup)		(void);
+	void (*get_clk_info)	(u32 *clk_period, u32 *max_clk_div);
+	unsigned long (*get_max_tx_rate)(void);
+	int  (*convert_timings)	(struct extif_timings *timings);
+	void (*set_timings)	(const struct extif_timings *timings);
+	void (*set_bits_per_cycle)(int bpc);
+	void (*write_command)	(const void *buf, unsigned int len);
+	void (*read_data)	(void *buf, unsigned int len);
+	void (*write_data)	(const void *buf, unsigned int len);
+	void (*transfer_area)	(int width, int height,
+				 void (callback)(void *data), void *data);
+	int  (*setup_tearsync)	(unsigned pin_cnt,
+				 unsigned hs_pulse_time, unsigned vs_pulse_time,
+				 int hs_pol_inv, int vs_pol_inv, int div);
+	int  (*enable_tearsync) (int enable, unsigned line);
+
+	unsigned long		max_transmit_size;
+};
+
+struct omapfb_notifier_block {
+	struct notifier_block	nb;
+	void			*data;
+	int			plane_idx;
+};
+
+typedef int (*omapfb_notifier_callback_t)(struct notifier_block *,
+					  unsigned long event,
+					  void *fbi);
+
+struct lcd_ctrl {
+	const char	*name;
+	void		*data;
+
+	int		(*init)		  (struct omapfb_device *fbdev,
+					   int ext_mode,
+					   struct omapfb_mem_desc *req_md);
+	void		(*cleanup)	  (void);
+	void		(*bind_client)	  (struct omapfb_notifier_block *nb);
+	void		(*get_caps)	  (int plane, struct omapfb_caps *caps);
+	int		(*set_update_mode)(enum omapfb_update_mode mode);
+	enum omapfb_update_mode (*get_update_mode)(void);
+	int		(*setup_plane)	  (int plane, int channel_out,
+					   unsigned long offset,
+					   int screen_width,
+					   int pos_x, int pos_y, int width,
+					   int height, int color_mode);
+	int		(*set_rotate)	  (int angle);
+	int		(*setup_mem)	  (int plane, size_t size,
+					   int mem_type, unsigned long *paddr);
+	int		(*mmap)		  (struct fb_info *info,
+					   struct vm_area_struct *vma);
+	int		(*set_scale)	  (int plane,
+					   int orig_width, int orig_height,
+					   int out_width, int out_height);
+	int		(*enable_plane)	  (int plane, int enable);
+	int		(*update_window)  (struct fb_info *fbi,
+					   struct omapfb_update_window *win,
+					   void (*callback)(void *),
+					   void *callback_data);
+	void		(*sync)		  (void);
+	void		(*suspend)	  (void);
+	void		(*resume)	  (void);
+	int		(*run_test)	  (int test_num);
+	int		(*setcolreg)	  (u_int regno, u16 red, u16 green,
+					   u16 blue, u16 transp,
+					   int update_hw_mem);
+	int		(*set_color_key)  (struct omapfb_color_key *ck);
+	int		(*get_color_key)  (struct omapfb_color_key *ck);
+};
+
+enum omapfb_state {
+	OMAPFB_DISABLED		= 0,
+	OMAPFB_SUSPENDED	= 99,
+	OMAPFB_ACTIVE		= 100
+};
+
+struct omapfb_plane_struct {
+	int				idx;
+	struct omapfb_plane_info	info;
+	enum omapfb_color_format	color_mode;
+	struct omapfb_device		*fbdev;
+};
+
+struct omapfb_device {
+	int			state;
+	int                     ext_lcdc;		/* Using external
+							   LCD controller */
+	struct mutex		rqueue_mutex;
+
+	int			palette_size;
+	u32			pseudo_palette[17];
+
+	struct lcd_panel	*panel;			/* LCD panel */
+	const struct lcd_ctrl	*ctrl;			/* LCD controller */
+	const struct lcd_ctrl	*int_ctrl;		/* internal LCD ctrl */
+	struct lcd_ctrl_extif	*ext_if;		/* LCD ctrl external
+							   interface */
+	struct device		*dev;
+	struct fb_var_screeninfo	new_var;	/* for mode changes */
+
+	struct omapfb_mem_desc		mem_desc;
+	struct fb_info			*fb_info[OMAPFB_PLANE_NUM];
+};
+
+#ifdef CONFIG_ARCH_OMAP1
+extern struct lcd_ctrl omap1_lcd_ctrl;
+#else
+extern struct lcd_ctrl omap2_disp_ctrl;
+#endif
+
+extern void omapfb_register_panel(struct lcd_panel *panel);
+extern void omapfb_write_first_pixel(struct omapfb_device *fbdev, u16 pixval);
+extern void omapfb_notify_clients(struct omapfb_device *fbdev,
+				  unsigned long event);
+extern int  omapfb_register_client(struct omapfb_notifier_block *nb,
+				   omapfb_notifier_callback_t callback,
+				   void *callback_data);
+extern int  omapfb_unregister_client(struct omapfb_notifier_block *nb);
+extern int  omapfb_update_window_async(struct fb_info *fbi,
+				       struct omapfb_update_window *win,
+				       void (*callback)(void *),
+				       void *callback_data);
+
+#endif /* __OMAPFB_H */
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index f900a43..c7f59a5 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -29,8 +29,8 @@
 #include <linux/uaccess.h>
 
 #include <plat/dma.h>
-#include <plat/omapfb.h>
 
+#include "omapfb.h"
 #include "lcdc.h"
 #include "dispc.h"
 
diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c
index c90fa39..fed7b1b 100644
--- a/drivers/video/omap/rfbi.c
+++ b/drivers/video/omap/rfbi.c
@@ -27,8 +27,7 @@
 #include <linux/clk.h>
 #include <linux/io.h>
 
-#include <plat/omapfb.h>
-
+#include "omapfb.h"
 #include "dispc.h"
 
 /* To work around an RFBI transfer rate limitation */
diff --git a/drivers/video/omap/sossi.c b/drivers/video/omap/sossi.c
index 79dc84f..8fb7c70 100644
--- a/drivers/video/omap/sossi.c
+++ b/drivers/video/omap/sossi.c
@@ -23,10 +23,11 @@
 #include <linux/clk.h>
 #include <linux/irq.h>
 #include <linux/io.h>
+#include <linux/interrupt.h>
 
 #include <plat/dma.h>
-#include <plat/omapfb.h>
 
+#include "omapfb.h"
 #include "lcdc.h"
 
 #define MODULE_NAME		"omapfb-sossi"
diff --git a/drivers/video/omap2/Kconfig b/drivers/video/omap2/Kconfig
new file mode 100644
index 0000000..d877c36
--- /dev/null
+++ b/drivers/video/omap2/Kconfig
@@ -0,0 +1,9 @@
+config OMAP2_VRAM
+	bool
+
+config OMAP2_VRFB
+	bool
+
+source "drivers/video/omap2/dss/Kconfig"
+source "drivers/video/omap2/omapfb/Kconfig"
+source "drivers/video/omap2/displays/Kconfig"
diff --git a/drivers/video/omap2/Makefile b/drivers/video/omap2/Makefile
new file mode 100644
index 0000000..d853d05
--- /dev/null
+++ b/drivers/video/omap2/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_OMAP2_VRAM) += vram.o
+obj-$(CONFIG_OMAP2_VRFB) += vrfb.o
+
+obj-y += dss/
+obj-y += omapfb/
+obj-y += displays/
diff --git a/drivers/video/omap2/displays/Kconfig b/drivers/video/omap2/displays/Kconfig
new file mode 100644
index 0000000..b12a59c
--- /dev/null
+++ b/drivers/video/omap2/displays/Kconfig
@@ -0,0 +1,22 @@
+menu "OMAP2/3 Display Device Drivers"
+        depends on OMAP2_DSS
+
+config PANEL_GENERIC
+        tristate "Generic Panel"
+        help
+	  Generic panel driver.
+	  Used for DVI output for Beagle and OMAP3 SDP.
+
+config PANEL_SHARP_LS037V7DW01
+        tristate "Sharp LS037V7DW01 LCD Panel"
+        depends on OMAP2_DSS
+        help
+          LCD Panel used in TI's SDP3430 and EVM boards
+
+config PANEL_TAAL
+        tristate "Taal DSI Panel"
+        depends on OMAP2_DSS_DSI
+        help
+          Taal DSI command mode panel from TPO.
+
+endmenu
diff --git a/drivers/video/omap2/displays/Makefile b/drivers/video/omap2/displays/Makefile
new file mode 100644
index 0000000..9556464
--- /dev/null
+++ b/drivers/video/omap2/displays/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_PANEL_GENERIC) += panel-generic.o
+obj-$(CONFIG_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
+
+obj-$(CONFIG_PANEL_TAAL) += panel-taal.o
diff --git a/drivers/video/omap2/displays/panel-generic.c b/drivers/video/omap2/displays/panel-generic.c
new file mode 100644
index 0000000..eb48d1a
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-generic.c
@@ -0,0 +1,104 @@
+/*
+ * Generic panel support
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+
+#include <plat/display.h>
+
+static struct omap_video_timings generic_panel_timings = {
+	/* 640 x 480 @ 60 Hz  Reduced blanking VESA CVT 0.31M3-R */
+	.x_res		= 640,
+	.y_res		= 480,
+	.pixel_clock	= 23500,
+	.hfp		= 48,
+	.hsw		= 32,
+	.hbp		= 80,
+	.vfp		= 3,
+	.vsw		= 4,
+	.vbp		= 7,
+};
+
+static int generic_panel_probe(struct omap_dss_device *dssdev)
+{
+	dssdev->panel.config = OMAP_DSS_LCD_TFT;
+	dssdev->panel.timings = generic_panel_timings;
+
+	return 0;
+}
+
+static void generic_panel_remove(struct omap_dss_device *dssdev)
+{
+}
+
+static int generic_panel_enable(struct omap_dss_device *dssdev)
+{
+	int r = 0;
+
+	if (dssdev->platform_enable)
+		r = dssdev->platform_enable(dssdev);
+
+	return r;
+}
+
+static void generic_panel_disable(struct omap_dss_device *dssdev)
+{
+	if (dssdev->platform_disable)
+		dssdev->platform_disable(dssdev);
+}
+
+static int generic_panel_suspend(struct omap_dss_device *dssdev)
+{
+	generic_panel_disable(dssdev);
+	return 0;
+}
+
+static int generic_panel_resume(struct omap_dss_device *dssdev)
+{
+	return generic_panel_enable(dssdev);
+}
+
+static struct omap_dss_driver generic_driver = {
+	.probe		= generic_panel_probe,
+	.remove		= generic_panel_remove,
+
+	.enable		= generic_panel_enable,
+	.disable	= generic_panel_disable,
+	.suspend	= generic_panel_suspend,
+	.resume		= generic_panel_resume,
+
+	.driver         = {
+		.name   = "generic_panel",
+		.owner  = THIS_MODULE,
+	},
+};
+
+static int __init generic_panel_drv_init(void)
+{
+	return omap_dss_register_driver(&generic_driver);
+}
+
+static void __exit generic_panel_drv_exit(void)
+{
+	omap_dss_unregister_driver(&generic_driver);
+}
+
+module_init(generic_panel_drv_init);
+module_exit(generic_panel_drv_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
new file mode 100644
index 0000000..bbe880bb
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
@@ -0,0 +1,153 @@
+/*
+ * LCD panel driver for Sharp LS037V7DW01
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+
+#include <plat/display.h>
+
+struct sharp_data {
+	/* XXX This regulator should actually be in SDP board file, not here,
+	 * as it doesn't actually power the LCD, but something else that
+	 * affects the output to LCD (I think. Somebody clarify). It doesn't do
+	 * harm here, as SDP is the only board using this currently */
+	struct regulator *vdvi_reg;
+};
+
+static struct omap_video_timings sharp_ls_timings = {
+	.x_res = 480,
+	.y_res = 640,
+
+	.pixel_clock	= 19200,
+
+	.hsw		= 2,
+	.hfp		= 1,
+	.hbp		= 28,
+
+	.vsw		= 1,
+	.vfp		= 1,
+	.vbp		= 1,
+};
+
+static int sharp_ls_panel_probe(struct omap_dss_device *dssdev)
+{
+	struct sharp_data *sd;
+
+	dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+		OMAP_DSS_LCD_IHS;
+	dssdev->panel.acb = 0x28;
+	dssdev->panel.timings = sharp_ls_timings;
+
+	sd = kzalloc(sizeof(*sd), GFP_KERNEL);
+	if (!sd)
+		return -ENOMEM;
+
+	dev_set_drvdata(&dssdev->dev, sd);
+
+	sd->vdvi_reg = regulator_get(&dssdev->dev, "vdvi");
+	if (IS_ERR(sd->vdvi_reg)) {
+		kfree(sd);
+		pr_err("failed to get VDVI regulator\n");
+		return PTR_ERR(sd->vdvi_reg);
+	}
+
+	return 0;
+}
+
+static void sharp_ls_panel_remove(struct omap_dss_device *dssdev)
+{
+	struct sharp_data *sd = dev_get_drvdata(&dssdev->dev);
+
+	regulator_put(sd->vdvi_reg);
+
+	kfree(sd);
+}
+
+static int sharp_ls_panel_enable(struct omap_dss_device *dssdev)
+{
+	struct sharp_data *sd = dev_get_drvdata(&dssdev->dev);
+	int r = 0;
+
+	/* wait couple of vsyncs until enabling the LCD */
+	msleep(50);
+
+	regulator_enable(sd->vdvi_reg);
+
+	if (dssdev->platform_enable)
+		r = dssdev->platform_enable(dssdev);
+
+	return r;
+}
+
+static void sharp_ls_panel_disable(struct omap_dss_device *dssdev)
+{
+	struct sharp_data *sd = dev_get_drvdata(&dssdev->dev);
+
+	if (dssdev->platform_disable)
+		dssdev->platform_disable(dssdev);
+
+	regulator_disable(sd->vdvi_reg);
+
+	/* wait at least 5 vsyncs after disabling the LCD */
+
+	msleep(100);
+}
+
+static int sharp_ls_panel_suspend(struct omap_dss_device *dssdev)
+{
+	sharp_ls_panel_disable(dssdev);
+	return 0;
+}
+
+static int sharp_ls_panel_resume(struct omap_dss_device *dssdev)
+{
+	return sharp_ls_panel_enable(dssdev);
+}
+
+static struct omap_dss_driver sharp_ls_driver = {
+	.probe		= sharp_ls_panel_probe,
+	.remove		= sharp_ls_panel_remove,
+
+	.enable		= sharp_ls_panel_enable,
+	.disable	= sharp_ls_panel_disable,
+	.suspend	= sharp_ls_panel_suspend,
+	.resume		= sharp_ls_panel_resume,
+
+	.driver         = {
+		.name   = "sharp_ls_panel",
+		.owner  = THIS_MODULE,
+	},
+};
+
+static int __init sharp_ls_panel_drv_init(void)
+{
+	return omap_dss_register_driver(&sharp_ls_driver);
+}
+
+static void __exit sharp_ls_panel_drv_exit(void)
+{
+	omap_dss_unregister_driver(&sharp_ls_driver);
+}
+
+module_init(sharp_ls_panel_drv_init);
+module_exit(sharp_ls_panel_drv_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
new file mode 100644
index 0000000..1f01dfc
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -0,0 +1,1003 @@
+/*
+ * Taal DSI command mode panel
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*#define DEBUG*/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+
+#include <plat/display.h>
+
+/* DSI Virtual channel. Hardcoded for now. */
+#define TCH 0
+
+#define DCS_READ_NUM_ERRORS	0x05
+#define DCS_READ_POWER_MODE	0x0a
+#define DCS_READ_MADCTL		0x0b
+#define DCS_READ_PIXEL_FORMAT	0x0c
+#define DCS_RDDSDR		0x0f
+#define DCS_SLEEP_IN		0x10
+#define DCS_SLEEP_OUT		0x11
+#define DCS_DISPLAY_OFF		0x28
+#define DCS_DISPLAY_ON		0x29
+#define DCS_COLUMN_ADDR		0x2a
+#define DCS_PAGE_ADDR		0x2b
+#define DCS_MEMORY_WRITE	0x2c
+#define DCS_TEAR_OFF		0x34
+#define DCS_TEAR_ON		0x35
+#define DCS_MEM_ACC_CTRL	0x36
+#define DCS_PIXEL_FORMAT	0x3a
+#define DCS_BRIGHTNESS		0x51
+#define DCS_CTRL_DISPLAY	0x53
+#define DCS_WRITE_CABC		0x55
+#define DCS_READ_CABC		0x56
+#define DCS_GET_ID1		0xda
+#define DCS_GET_ID2		0xdb
+#define DCS_GET_ID3		0xdc
+
+/* #define TAAL_USE_ESD_CHECK */
+#define TAAL_ESD_CHECK_PERIOD	msecs_to_jiffies(5000)
+
+struct taal_data {
+	struct backlight_device *bldev;
+
+	unsigned long	hw_guard_end;	/* next value of jiffies when we can
+					 * issue the next sleep in/out command
+					 */
+	unsigned long	hw_guard_wait;	/* max guard time in jiffies */
+
+	struct omap_dss_device *dssdev;
+
+	bool enabled;
+	u8 rotate;
+	bool mirror;
+
+	bool te_enabled;
+	bool use_ext_te;
+	struct completion te_completion;
+
+	bool use_dsi_bl;
+
+	bool cabc_broken;
+	unsigned cabc_mode;
+
+	bool intro_printed;
+
+	struct workqueue_struct *esd_wq;
+	struct delayed_work esd_work;
+};
+
+static void taal_esd_work(struct work_struct *work);
+
+static void hw_guard_start(struct taal_data *td, int guard_msec)
+{
+	td->hw_guard_wait = msecs_to_jiffies(guard_msec);
+	td->hw_guard_end = jiffies + td->hw_guard_wait;
+}
+
+static void hw_guard_wait(struct taal_data *td)
+{
+	unsigned long wait = td->hw_guard_end - jiffies;
+
+	if ((long)wait > 0 && wait <= td->hw_guard_wait) {
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(wait);
+	}
+}
+
+static int taal_dcs_read_1(u8 dcs_cmd, u8 *data)
+{
+	int r;
+	u8 buf[1];
+
+	r = dsi_vc_dcs_read(TCH, dcs_cmd, buf, 1);
+
+	if (r < 0)
+		return r;
+
+	*data = buf[0];
+
+	return 0;
+}
+
+static int taal_dcs_write_0(u8 dcs_cmd)
+{
+	return dsi_vc_dcs_write(TCH, &dcs_cmd, 1);
+}
+
+static int taal_dcs_write_1(u8 dcs_cmd, u8 param)
+{
+	u8 buf[2];
+	buf[0] = dcs_cmd;
+	buf[1] = param;
+	return dsi_vc_dcs_write(TCH, buf, 2);
+}
+
+static int taal_sleep_in(struct taal_data *td)
+
+{
+	u8 cmd;
+	int r;
+
+	hw_guard_wait(td);
+
+	cmd = DCS_SLEEP_IN;
+	r = dsi_vc_dcs_write_nosync(TCH, &cmd, 1);
+	if (r)
+		return r;
+
+	hw_guard_start(td, 120);
+
+	msleep(5);
+
+	return 0;
+}
+
+static int taal_sleep_out(struct taal_data *td)
+{
+	int r;
+
+	hw_guard_wait(td);
+
+	r = taal_dcs_write_0(DCS_SLEEP_OUT);
+	if (r)
+		return r;
+
+	hw_guard_start(td, 120);
+
+	msleep(5);
+
+	return 0;
+}
+
+static int taal_get_id(u8 *id1, u8 *id2, u8 *id3)
+{
+	int r;
+
+	r = taal_dcs_read_1(DCS_GET_ID1, id1);
+	if (r)
+		return r;
+	r = taal_dcs_read_1(DCS_GET_ID2, id2);
+	if (r)
+		return r;
+	r = taal_dcs_read_1(DCS_GET_ID3, id3);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+static int taal_set_addr_mode(u8 rotate, bool mirror)
+{
+	int r;
+	u8 mode;
+	int b5, b6, b7;
+
+	r = taal_dcs_read_1(DCS_READ_MADCTL, &mode);
+	if (r)
+		return r;
+
+	switch (rotate) {
+	default:
+	case 0:
+		b7 = 0;
+		b6 = 0;
+		b5 = 0;
+		break;
+	case 1:
+		b7 = 0;
+		b6 = 1;
+		b5 = 1;
+		break;
+	case 2:
+		b7 = 1;
+		b6 = 1;
+		b5 = 0;
+		break;
+	case 3:
+		b7 = 1;
+		b6 = 0;
+		b5 = 1;
+		break;
+	}
+
+	if (mirror)
+		b6 = !b6;
+
+	mode &= ~((1<<7) | (1<<6) | (1<<5));
+	mode |= (b7 << 7) | (b6 << 6) | (b5 << 5);
+
+	return taal_dcs_write_1(DCS_MEM_ACC_CTRL, mode);
+}
+
+static int taal_set_update_window(u16 x, u16 y, u16 w, u16 h)
+{
+	int r;
+	u16 x1 = x;
+	u16 x2 = x + w - 1;
+	u16 y1 = y;
+	u16 y2 = y + h - 1;
+
+	u8 buf[5];
+	buf[0] = DCS_COLUMN_ADDR;
+	buf[1] = (x1 >> 8) & 0xff;
+	buf[2] = (x1 >> 0) & 0xff;
+	buf[3] = (x2 >> 8) & 0xff;
+	buf[4] = (x2 >> 0) & 0xff;
+
+	r = dsi_vc_dcs_write_nosync(TCH, buf, sizeof(buf));
+	if (r)
+		return r;
+
+	buf[0] = DCS_PAGE_ADDR;
+	buf[1] = (y1 >> 8) & 0xff;
+	buf[2] = (y1 >> 0) & 0xff;
+	buf[3] = (y2 >> 8) & 0xff;
+	buf[4] = (y2 >> 0) & 0xff;
+
+	r = dsi_vc_dcs_write_nosync(TCH, buf, sizeof(buf));
+	if (r)
+		return r;
+
+	dsi_vc_send_bta_sync(TCH);
+
+	return r;
+}
+
+static int taal_bl_update_status(struct backlight_device *dev)
+{
+	struct omap_dss_device *dssdev = dev_get_drvdata(&dev->dev);
+	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+	int r;
+	int level;
+
+	if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+			dev->props.power == FB_BLANK_UNBLANK)
+		level = dev->props.brightness;
+	else
+		level = 0;
+
+	dev_dbg(&dssdev->dev, "update brightness to %d\n", level);
+
+	if (td->use_dsi_bl) {
+		if (td->enabled) {
+			dsi_bus_lock();
+			r = taal_dcs_write_1(DCS_BRIGHTNESS, level);
+			dsi_bus_unlock();
+			if (r)
+				return r;
+		}
+	} else {
+		if (!dssdev->set_backlight)
+			return -EINVAL;
+
+		r = dssdev->set_backlight(dssdev, level);
+		if (r)
+			return r;
+	}
+
+	return 0;
+}
+
+static int taal_bl_get_intensity(struct backlight_device *dev)
+{
+	if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+			dev->props.power == FB_BLANK_UNBLANK)
+		return dev->props.brightness;
+
+	return 0;
+}
+
+static struct backlight_ops taal_bl_ops = {
+	.get_brightness = taal_bl_get_intensity,
+	.update_status  = taal_bl_update_status,
+};
+
+static void taal_get_timings(struct omap_dss_device *dssdev,
+			struct omap_video_timings *timings)
+{
+	*timings = dssdev->panel.timings;
+}
+
+static void taal_get_resolution(struct omap_dss_device *dssdev,
+		u16 *xres, u16 *yres)
+{
+	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+
+	if (td->rotate == 0 || td->rotate == 2) {
+		*xres = dssdev->panel.timings.x_res;
+		*yres = dssdev->panel.timings.y_res;
+	} else {
+		*yres = dssdev->panel.timings.x_res;
+		*xres = dssdev->panel.timings.y_res;
+	}
+}
+
+static irqreturn_t taal_te_isr(int irq, void *data)
+{
+	struct omap_dss_device *dssdev = data;
+	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+
+	complete_all(&td->te_completion);
+
+	return IRQ_HANDLED;
+}
+
+static ssize_t taal_num_errors_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+	u8 errors;
+	int r;
+
+	if (td->enabled) {
+		dsi_bus_lock();
+		r = taal_dcs_read_1(DCS_READ_NUM_ERRORS, &errors);
+		dsi_bus_unlock();
+	} else {
+		r = -ENODEV;
+	}
+
+	if (r)
+		return r;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", errors);
+}
+
+static ssize_t taal_hw_revision_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+	u8 id1, id2, id3;
+	int r;
+
+	if (td->enabled) {
+		dsi_bus_lock();
+		r = taal_get_id(&id1, &id2, &id3);
+		dsi_bus_unlock();
+	} else {
+		r = -ENODEV;
+	}
+
+	if (r)
+		return r;
+
+	return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3);
+}
+
+static const char *cabc_modes[] = {
+	"off",		/* used also always when CABC is not supported */
+	"ui",
+	"still-image",
+	"moving-image",
+};
+
+static ssize_t show_cabc_mode(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+	const char *mode_str;
+	int mode;
+	int len;
+
+	mode = td->cabc_mode;
+
+	mode_str = "unknown";
+	if (mode >= 0 && mode < ARRAY_SIZE(cabc_modes))
+		mode_str = cabc_modes[mode];
+	len = snprintf(buf, PAGE_SIZE, "%s\n", mode_str);
+
+	return len < PAGE_SIZE - 1 ? len : PAGE_SIZE - 1;
+}
+
+static ssize_t store_cabc_mode(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) {
+		if (sysfs_streq(cabc_modes[i], buf))
+			break;
+	}
+
+	if (i == ARRAY_SIZE(cabc_modes))
+		return -EINVAL;
+
+	if (td->enabled) {
+		dsi_bus_lock();
+		if (!td->cabc_broken)
+			taal_dcs_write_1(DCS_WRITE_CABC, i);
+		dsi_bus_unlock();
+	}
+
+	td->cabc_mode = i;
+
+	return count;
+}
+
+static ssize_t show_cabc_available_modes(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int len;
+	int i;
+
+	for (i = 0, len = 0;
+	     len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++)
+		len += snprintf(&buf[len], PAGE_SIZE - len, "%s%s%s",
+			i ? " " : "", cabc_modes[i],
+			i == ARRAY_SIZE(cabc_modes) - 1 ? "\n" : "");
+
+	return len < PAGE_SIZE ? len : PAGE_SIZE - 1;
+}
+
+static DEVICE_ATTR(num_dsi_errors, S_IRUGO, taal_num_errors_show, NULL);
+static DEVICE_ATTR(hw_revision, S_IRUGO, taal_hw_revision_show, NULL);
+static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR,
+		show_cabc_mode, store_cabc_mode);
+static DEVICE_ATTR(cabc_available_modes, S_IRUGO,
+		show_cabc_available_modes, NULL);
+
+static struct attribute *taal_attrs[] = {
+	&dev_attr_num_dsi_errors.attr,
+	&dev_attr_hw_revision.attr,
+	&dev_attr_cabc_mode.attr,
+	&dev_attr_cabc_available_modes.attr,
+	NULL,
+};
+
+static struct attribute_group taal_attr_group = {
+	.attrs = taal_attrs,
+};
+
+static int taal_probe(struct omap_dss_device *dssdev)
+{
+	struct taal_data *td;
+	struct backlight_device *bldev;
+	int r;
+
+	const struct omap_video_timings taal_panel_timings = {
+		.x_res		= 864,
+		.y_res		= 480,
+	};
+
+	dev_dbg(&dssdev->dev, "probe\n");
+
+	dssdev->panel.config = OMAP_DSS_LCD_TFT;
+	dssdev->panel.timings = taal_panel_timings;
+	dssdev->ctrl.pixel_size = 24;
+
+	td = kzalloc(sizeof(*td), GFP_KERNEL);
+	if (!td) {
+		r = -ENOMEM;
+		goto err0;
+	}
+	td->dssdev = dssdev;
+
+	td->esd_wq = create_singlethread_workqueue("taal_esd");
+	if (td->esd_wq == NULL) {
+		dev_err(&dssdev->dev, "can't create ESD workqueue\n");
+		r = -ENOMEM;
+		goto err2;
+	}
+	INIT_DELAYED_WORK_DEFERRABLE(&td->esd_work, taal_esd_work);
+
+	dev_set_drvdata(&dssdev->dev, td);
+
+	dssdev->get_timings = taal_get_timings;
+	dssdev->get_resolution = taal_get_resolution;
+
+	/* if no platform set_backlight() defined, presume DSI backlight
+	 * control */
+	if (!dssdev->set_backlight)
+		td->use_dsi_bl = true;
+
+	bldev = backlight_device_register("taal", &dssdev->dev, dssdev,
+			&taal_bl_ops);
+	if (IS_ERR(bldev)) {
+		r = PTR_ERR(bldev);
+		goto err1;
+	}
+
+	td->bldev = bldev;
+
+	bldev->props.fb_blank = FB_BLANK_UNBLANK;
+	bldev->props.power = FB_BLANK_UNBLANK;
+	if (td->use_dsi_bl) {
+		bldev->props.max_brightness = 255;
+		bldev->props.brightness = 255;
+	} else {
+		bldev->props.max_brightness = 127;
+		bldev->props.brightness = 127;
+	}
+
+	taal_bl_update_status(bldev);
+
+	if (dssdev->phy.dsi.ext_te) {
+		int gpio = dssdev->phy.dsi.ext_te_gpio;
+
+		r = gpio_request(gpio, "taal irq");
+		if (r) {
+			dev_err(&dssdev->dev, "GPIO request failed\n");
+			goto err3;
+		}
+
+		gpio_direction_input(gpio);
+
+		r = request_irq(gpio_to_irq(gpio), taal_te_isr,
+				IRQF_DISABLED | IRQF_TRIGGER_RISING,
+				"taal vsync", dssdev);
+
+		if (r) {
+			dev_err(&dssdev->dev, "IRQ request failed\n");
+			gpio_free(gpio);
+			goto err3;
+		}
+
+		init_completion(&td->te_completion);
+
+		td->use_ext_te = true;
+	}
+
+	r = sysfs_create_group(&dssdev->dev.kobj, &taal_attr_group);
+	if (r) {
+		dev_err(&dssdev->dev, "failed to create sysfs files\n");
+		goto err4;
+	}
+
+	return 0;
+err4:
+	if (td->use_ext_te) {
+		int gpio = dssdev->phy.dsi.ext_te_gpio;
+		free_irq(gpio_to_irq(gpio), dssdev);
+		gpio_free(gpio);
+	}
+err3:
+	backlight_device_unregister(bldev);
+err2:
+	cancel_delayed_work_sync(&td->esd_work);
+	destroy_workqueue(td->esd_wq);
+err1:
+	kfree(td);
+err0:
+	return r;
+}
+
+static void taal_remove(struct omap_dss_device *dssdev)
+{
+	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+	struct backlight_device *bldev;
+
+	dev_dbg(&dssdev->dev, "remove\n");
+
+	sysfs_remove_group(&dssdev->dev.kobj, &taal_attr_group);
+
+	if (td->use_ext_te) {
+		int gpio = dssdev->phy.dsi.ext_te_gpio;
+		free_irq(gpio_to_irq(gpio), dssdev);
+		gpio_free(gpio);
+	}
+
+	bldev = td->bldev;
+	bldev->props.power = FB_BLANK_POWERDOWN;
+	taal_bl_update_status(bldev);
+	backlight_device_unregister(bldev);
+
+	cancel_delayed_work_sync(&td->esd_work);
+	destroy_workqueue(td->esd_wq);
+
+	kfree(td);
+}
+
+static int taal_enable(struct omap_dss_device *dssdev)
+{
+	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+	u8 id1, id2, id3;
+	int r;
+
+	dev_dbg(&dssdev->dev, "enable\n");
+
+	if (dssdev->platform_enable) {
+		r = dssdev->platform_enable(dssdev);
+		if (r)
+			return r;
+	}
+
+	/* it seems we have to wait a bit until taal is ready */
+	msleep(5);
+
+	r = taal_sleep_out(td);
+	if (r)
+		goto err;
+
+	r = taal_get_id(&id1, &id2, &id3);
+	if (r)
+		goto err;
+
+	/* on early revisions CABC is broken */
+	if (id2 == 0x00 || id2 == 0xff || id2 == 0x81)
+		td->cabc_broken = true;
+
+	taal_dcs_write_1(DCS_BRIGHTNESS, 0xff);
+	taal_dcs_write_1(DCS_CTRL_DISPLAY, (1<<2) | (1<<5)); /* BL | BCTRL */
+
+	taal_dcs_write_1(DCS_PIXEL_FORMAT, 0x7); /* 24bit/pixel */
+
+	taal_set_addr_mode(td->rotate, td->mirror);
+	if (!td->cabc_broken)
+		taal_dcs_write_1(DCS_WRITE_CABC, td->cabc_mode);
+
+	taal_dcs_write_0(DCS_DISPLAY_ON);
+
+#ifdef TAAL_USE_ESD_CHECK
+	queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD);
+#endif
+
+	td->enabled = 1;
+
+	if (!td->intro_printed) {
+		dev_info(&dssdev->dev, "revision %02x.%02x.%02x\n",
+				id1, id2, id3);
+		if (td->cabc_broken)
+			dev_info(&dssdev->dev,
+					"old Taal version, CABC disabled\n");
+		td->intro_printed = true;
+	}
+
+	return 0;
+err:
+	if (dssdev->platform_disable)
+		dssdev->platform_disable(dssdev);
+
+	return r;
+}
+
+static void taal_disable(struct omap_dss_device *dssdev)
+{
+	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+
+	dev_dbg(&dssdev->dev, "disable\n");
+
+	cancel_delayed_work(&td->esd_work);
+
+	taal_dcs_write_0(DCS_DISPLAY_OFF);
+	taal_sleep_in(td);
+
+	/* wait a bit so that the message goes through */
+	msleep(10);
+
+	if (dssdev->platform_disable)
+		dssdev->platform_disable(dssdev);
+
+	td->enabled = 0;
+}
+
+static int taal_suspend(struct omap_dss_device *dssdev)
+{
+	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+	struct backlight_device *bldev = td->bldev;
+
+	bldev->props.power = FB_BLANK_POWERDOWN;
+	taal_bl_update_status(bldev);
+
+	return 0;
+}
+
+static int taal_resume(struct omap_dss_device *dssdev)
+{
+	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+	struct backlight_device *bldev = td->bldev;
+
+	bldev->props.power = FB_BLANK_UNBLANK;
+	taal_bl_update_status(bldev);
+
+	return 0;
+}
+
+static void taal_setup_update(struct omap_dss_device *dssdev,
+				    u16 x, u16 y, u16 w, u16 h)
+{
+	taal_set_update_window(x, y, w, h);
+}
+
+static int taal_enable_te(struct omap_dss_device *dssdev, bool enable)
+{
+	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+	int r;
+
+	td->te_enabled = enable;
+
+	if (enable)
+		r = taal_dcs_write_1(DCS_TEAR_ON, 0);
+	else
+		r = taal_dcs_write_0(DCS_TEAR_OFF);
+
+	return r;
+}
+
+static int taal_wait_te(struct omap_dss_device *dssdev)
+{
+	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+	long wait = msecs_to_jiffies(500);
+
+	if (!td->use_ext_te || !td->te_enabled)
+		return 0;
+
+	INIT_COMPLETION(td->te_completion);
+	wait = wait_for_completion_timeout(&td->te_completion, wait);
+	if (wait == 0) {
+		dev_err(&dssdev->dev, "timeout waiting TE\n");
+		return -ETIME;
+	}
+
+	return 0;
+}
+
+static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
+{
+	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+	int r;
+
+	dev_dbg(&dssdev->dev, "rotate %d\n", rotate);
+
+	if (td->enabled) {
+		r = taal_set_addr_mode(rotate, td->mirror);
+
+		if (r)
+			return r;
+	}
+
+	td->rotate = rotate;
+
+	return 0;
+}
+
+static u8 taal_get_rotate(struct omap_dss_device *dssdev)
+{
+	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+	return td->rotate;
+}
+
+static int taal_mirror(struct omap_dss_device *dssdev, bool enable)
+{
+	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+	int r;
+
+	dev_dbg(&dssdev->dev, "mirror %d\n", enable);
+
+	if (td->enabled) {
+		r = taal_set_addr_mode(td->rotate, enable);
+
+		if (r)
+			return r;
+	}
+
+	td->mirror = enable;
+
+	return 0;
+}
+
+static bool taal_get_mirror(struct omap_dss_device *dssdev)
+{
+	struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+	return td->mirror;
+}
+
+static int taal_run_test(struct omap_dss_device *dssdev, int test_num)
+{
+	u8 id1, id2, id3;
+	int r;
+
+	r = taal_dcs_read_1(DCS_GET_ID1, &id1);
+	if (r)
+		return r;
+	r = taal_dcs_read_1(DCS_GET_ID2, &id2);
+	if (r)
+		return r;
+	r = taal_dcs_read_1(DCS_GET_ID3, &id3);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+static int taal_memory_read(struct omap_dss_device *dssdev,
+		void *buf, size_t size,
+		u16 x, u16 y, u16 w, u16 h)
+{
+	int r;
+	int first = 1;
+	int plen;
+	unsigned buf_used = 0;
+
+	if (size < w * h * 3)
+		return -ENOMEM;
+
+	size = min(w * h * 3,
+			dssdev->panel.timings.x_res *
+			dssdev->panel.timings.y_res * 3);
+
+	/* plen 1 or 2 goes into short packet. until checksum error is fixed,
+	 * use short packets. plen 32 works, but bigger packets seem to cause
+	 * an error. */
+	if (size % 2)
+		plen = 1;
+	else
+		plen = 2;
+
+	taal_setup_update(dssdev, x, y, w, h);
+
+	r = dsi_vc_set_max_rx_packet_size(TCH, plen);
+	if (r)
+		return r;
+
+	while (buf_used < size) {
+		u8 dcs_cmd = first ? 0x2e : 0x3e;
+		first = 0;
+
+		r = dsi_vc_dcs_read(TCH, dcs_cmd,
+				buf + buf_used, size - buf_used);
+
+		if (r < 0) {
+			dev_err(&dssdev->dev, "read error\n");
+			goto err;
+		}
+
+		buf_used += r;
+
+		if (r < plen) {
+			dev_err(&dssdev->dev, "short read\n");
+			break;
+		}
+
+		if (signal_pending(current)) {
+			dev_err(&dssdev->dev, "signal pending, "
+					"aborting memory read\n");
+			r = -ERESTARTSYS;
+			goto err;
+		}
+	}
+
+	r = buf_used;
+
+err:
+	dsi_vc_set_max_rx_packet_size(TCH, 1);
+
+	return r;
+}
+
+static void taal_esd_work(struct work_struct *work)
+{
+	struct taal_data *td = container_of(work, struct taal_data,
+			esd_work.work);
+	struct omap_dss_device *dssdev = td->dssdev;
+	u8 state1, state2;
+	int r;
+
+	if (!td->enabled)
+		return;
+
+	dsi_bus_lock();
+
+	r = taal_dcs_read_1(DCS_RDDSDR, &state1);
+	if (r) {
+		dev_err(&dssdev->dev, "failed to read Taal status\n");
+		goto err;
+	}
+
+	/* Run self diagnostics */
+	r = taal_sleep_out(td);
+	if (r) {
+		dev_err(&dssdev->dev, "failed to run Taal self-diagnostics\n");
+		goto err;
+	}
+
+	r = taal_dcs_read_1(DCS_RDDSDR, &state2);
+	if (r) {
+		dev_err(&dssdev->dev, "failed to read Taal status\n");
+		goto err;
+	}
+
+	/* Each sleep out command will trigger a self diagnostic and flip
+	 * Bit6 if the test passes.
+	 */
+	if (!((state1 ^ state2) & (1 << 6))) {
+		dev_err(&dssdev->dev, "LCD self diagnostics failed\n");
+		goto err;
+	}
+	/* Self-diagnostics result is also shown on TE GPIO line. We need
+	 * to re-enable TE after self diagnostics */
+	if (td->use_ext_te && td->te_enabled)
+		taal_enable_te(dssdev, true);
+
+	dsi_bus_unlock();
+
+	queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD);
+
+	return;
+err:
+	dev_err(&dssdev->dev, "performing LCD reset\n");
+
+	taal_disable(dssdev);
+	taal_enable(dssdev);
+
+	dsi_bus_unlock();
+
+	queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD);
+}
+
+static struct omap_dss_driver taal_driver = {
+	.probe		= taal_probe,
+	.remove		= taal_remove,
+
+	.enable		= taal_enable,
+	.disable	= taal_disable,
+	.suspend	= taal_suspend,
+	.resume		= taal_resume,
+
+	.setup_update	= taal_setup_update,
+	.enable_te	= taal_enable_te,
+	.wait_for_te	= taal_wait_te,
+	.set_rotate	= taal_rotate,
+	.get_rotate	= taal_get_rotate,
+	.set_mirror	= taal_mirror,
+	.get_mirror	= taal_get_mirror,
+	.run_test	= taal_run_test,
+	.memory_read	= taal_memory_read,
+
+	.driver         = {
+		.name   = "taal",
+		.owner  = THIS_MODULE,
+	},
+};
+
+static int __init taal_init(void)
+{
+	omap_dss_register_driver(&taal_driver);
+
+	return 0;
+}
+
+static void __exit taal_exit(void)
+{
+	omap_dss_unregister_driver(&taal_driver);
+}
+
+module_init(taal_init);
+module_exit(taal_exit);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>");
+MODULE_DESCRIPTION("Taal Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
new file mode 100644
index 0000000..71d8dec
--- /dev/null
+++ b/drivers/video/omap2/dss/Kconfig
@@ -0,0 +1,89 @@
+menuconfig OMAP2_DSS
+        tristate "OMAP2/3 Display Subsystem support (EXPERIMENTAL)"
+        depends on ARCH_OMAP2 || ARCH_OMAP3
+        help
+          OMAP2/3 Display Subsystem support.
+
+if OMAP2_DSS
+
+config OMAP2_VRAM_SIZE
+	int "VRAM size (MB)"
+	range 0 32
+	default 0
+	help
+	  The amount of SDRAM to reserve at boot time for video RAM use.
+	  This VRAM will be used by omapfb and other drivers that need
+	  large continuous RAM area for video use.
+
+	  You can also set this with "vram=<bytes>" kernel argument, or
+	  in the board file.
+
+config OMAP2_DSS_DEBUG_SUPPORT
+        bool "Debug support"
+	default y
+	help
+	  This enables debug messages. You need to enable printing
+	  with 'debug' module parameter.
+
+config OMAP2_DSS_RFBI
+	bool "RFBI support"
+        default n
+	help
+	  MIPI DBI, or RFBI (Remote Framebuffer Interface), support.
+
+config OMAP2_DSS_VENC
+	bool "VENC support"
+        default y
+	help
+	  OMAP Video Encoder support.
+
+config OMAP2_DSS_SDI
+	bool "SDI support"
+	depends on ARCH_OMAP3
+        default n
+	help
+	  SDI (Serial Display Interface) support.
+
+config OMAP2_DSS_DSI
+	bool "DSI support"
+	depends on ARCH_OMAP3
+        default n
+	help
+	  MIPI DSI support.
+
+config OMAP2_DSS_USE_DSI_PLL
+	bool "Use DSI PLL for PCLK (EXPERIMENTAL)"
+	default n
+	depends on OMAP2_DSS_DSI
+	help
+	  Use DSI PLL to generate pixel clock.  Currently only for DPI output.
+	  DSI PLL can be used to generate higher and more precise pixel clocks.
+
+config OMAP2_DSS_FAKE_VSYNC
+	bool "Fake VSYNC irq from manual update displays"
+	default n
+	help
+	  If this is selected, DSI will generate a fake DISPC VSYNC interrupt
+	  when DSI has sent a frame. This is only needed with DSI or RFBI
+	  displays using manual mode, and you want VSYNC to, for example,
+	  time animation.
+
+config OMAP2_DSS_MIN_FCK_PER_PCK
+	int "Minimum FCK/PCK ratio (for scaling)"
+	range 0 32
+	default 0
+	help
+	  This can be used to adjust the minimum FCK/PCK ratio.
+
+	  With this you can make sure that DISPC FCK is at least
+	  n x PCK. Video plane scaling requires higher FCK than
+	  normally.
+
+	  If this is set to 0, there's no extra constraint on the
+	  DISPC FCK. However, the FCK will at minimum be
+	  2xPCK (if active matrix) or 3xPCK (if passive matrix).
+
+	  Max FCK is 173MHz, so this doesn't work if your PCK
+	  is very high.
+
+endif
diff --git a/drivers/video/omap2/dss/Makefile b/drivers/video/omap2/dss/Makefile
new file mode 100644
index 0000000..980c72c
--- /dev/null
+++ b/drivers/video/omap2/dss/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_OMAP2_DSS) += omapdss.o
+omapdss-y := core.o dss.o dispc.o dpi.o display.o manager.o overlay.o
+omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o
+omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
+omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o
+omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
new file mode 100644
index 0000000..29497a0
--- /dev/null
+++ b/drivers/video/omap2/dss/core.c
@@ -0,0 +1,919 @@
+/*
+ * linux/drivers/video/omap2/dss/core.c
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "CORE"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/device.h>
+
+#include <plat/display.h>
+#include <plat/clock.h>
+
+#include "dss.h"
+
+static struct {
+	struct platform_device *pdev;
+	int		ctx_id;
+
+	struct clk      *dss_ick;
+	struct clk	*dss1_fck;
+	struct clk	*dss2_fck;
+	struct clk      *dss_54m_fck;
+	struct clk	*dss_96m_fck;
+	unsigned	num_clks_enabled;
+} core;
+
+static void dss_clk_enable_all_no_ctx(void);
+static void dss_clk_disable_all_no_ctx(void);
+static void dss_clk_enable_no_ctx(enum dss_clock clks);
+static void dss_clk_disable_no_ctx(enum dss_clock clks);
+
+static char *def_disp_name;
+module_param_named(def_disp, def_disp_name, charp, 0);
+MODULE_PARM_DESC(def_disp_name, "default display name");
+
+#ifdef DEBUG
+unsigned int dss_debug;
+module_param_named(debug, dss_debug, bool, 0644);
+#endif
+
+/* CONTEXT */
+static int dss_get_ctx_id(void)
+{
+	struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
+	int r;
+
+	if (!pdata->get_last_off_on_transaction_id)
+		return 0;
+	r = pdata->get_last_off_on_transaction_id(&core.pdev->dev);
+	if (r < 0) {
+		dev_err(&core.pdev->dev, "getting transaction ID failed, "
+				"will force context restore\n");
+		r = -1;
+	}
+	return r;
+}
+
+int dss_need_ctx_restore(void)
+{
+	int id = dss_get_ctx_id();
+
+	if (id < 0 || id != core.ctx_id) {
+		DSSDBG("ctx id %d -> id %d\n",
+				core.ctx_id, id);
+		core.ctx_id = id;
+		return 1;
+	} else {
+		return 0;
+	}
+}
+
+static void save_all_ctx(void)
+{
+	DSSDBG("save context\n");
+
+	dss_clk_enable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+	dss_save_context();
+	dispc_save_context();
+#ifdef CONFIG_OMAP2_DSS_DSI
+	dsi_save_context();
+#endif
+
+	dss_clk_disable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK1);
+}
+
+static void restore_all_ctx(void)
+{
+	DSSDBG("restore context\n");
+
+	dss_clk_enable_all_no_ctx();
+
+	dss_restore_context();
+	dispc_restore_context();
+#ifdef CONFIG_OMAP2_DSS_DSI
+	dsi_restore_context();
+#endif
+
+	dss_clk_disable_all_no_ctx();
+}
+
+/* CLOCKS */
+static void core_dump_clocks(struct seq_file *s)
+{
+	int i;
+	struct clk *clocks[5] = {
+		core.dss_ick,
+		core.dss1_fck,
+		core.dss2_fck,
+		core.dss_54m_fck,
+		core.dss_96m_fck
+	};
+
+	seq_printf(s, "- CORE -\n");
+
+	seq_printf(s, "internal clk count\t\t%u\n", core.num_clks_enabled);
+
+	for (i = 0; i < 5; i++) {
+		if (!clocks[i])
+			continue;
+		seq_printf(s, "%-15s\t%lu\t%d\n",
+				clocks[i]->name,
+				clk_get_rate(clocks[i]),
+				clocks[i]->usecount);
+	}
+}
+
+static int dss_get_clock(struct clk **clock, const char *clk_name)
+{
+	struct clk *clk;
+
+	clk = clk_get(&core.pdev->dev, clk_name);
+
+	if (IS_ERR(clk)) {
+		DSSERR("can't get clock %s", clk_name);
+		return PTR_ERR(clk);
+	}
+
+	*clock = clk;
+
+	DSSDBG("clk %s, rate %ld\n", clk_name, clk_get_rate(clk));
+
+	return 0;
+}
+
+static int dss_get_clocks(void)
+{
+	int r;
+
+	core.dss_ick = NULL;
+	core.dss1_fck = NULL;
+	core.dss2_fck = NULL;
+	core.dss_54m_fck = NULL;
+	core.dss_96m_fck = NULL;
+
+	r = dss_get_clock(&core.dss_ick, "ick");
+	if (r)
+		goto err;
+
+	r = dss_get_clock(&core.dss1_fck, "dss1_fck");
+	if (r)
+		goto err;
+
+	r = dss_get_clock(&core.dss2_fck, "dss2_fck");
+	if (r)
+		goto err;
+
+	r = dss_get_clock(&core.dss_54m_fck, "tv_fck");
+	if (r)
+		goto err;
+
+	r = dss_get_clock(&core.dss_96m_fck, "video_fck");
+	if (r)
+		goto err;
+
+	return 0;
+
+err:
+	if (core.dss_ick)
+		clk_put(core.dss_ick);
+	if (core.dss1_fck)
+		clk_put(core.dss1_fck);
+	if (core.dss2_fck)
+		clk_put(core.dss2_fck);
+	if (core.dss_54m_fck)
+		clk_put(core.dss_54m_fck);
+	if (core.dss_96m_fck)
+		clk_put(core.dss_96m_fck);
+
+	return r;
+}
+
+static void dss_put_clocks(void)
+{
+	if (core.dss_96m_fck)
+		clk_put(core.dss_96m_fck);
+	clk_put(core.dss_54m_fck);
+	clk_put(core.dss1_fck);
+	clk_put(core.dss2_fck);
+	clk_put(core.dss_ick);
+}
+
+unsigned long dss_clk_get_rate(enum dss_clock clk)
+{
+	switch (clk) {
+	case DSS_CLK_ICK:
+		return clk_get_rate(core.dss_ick);
+	case DSS_CLK_FCK1:
+		return clk_get_rate(core.dss1_fck);
+	case DSS_CLK_FCK2:
+		return clk_get_rate(core.dss2_fck);
+	case DSS_CLK_54M:
+		return clk_get_rate(core.dss_54m_fck);
+	case DSS_CLK_96M:
+		return clk_get_rate(core.dss_96m_fck);
+	}
+
+	BUG();
+	return 0;
+}
+
+static unsigned count_clk_bits(enum dss_clock clks)
+{
+	unsigned num_clks = 0;
+
+	if (clks & DSS_CLK_ICK)
+		++num_clks;
+	if (clks & DSS_CLK_FCK1)
+		++num_clks;
+	if (clks & DSS_CLK_FCK2)
+		++num_clks;
+	if (clks & DSS_CLK_54M)
+		++num_clks;
+	if (clks & DSS_CLK_96M)
+		++num_clks;
+
+	return num_clks;
+}
+
+static void dss_clk_enable_no_ctx(enum dss_clock clks)
+{
+	unsigned num_clks = count_clk_bits(clks);
+
+	if (clks & DSS_CLK_ICK)
+		clk_enable(core.dss_ick);
+	if (clks & DSS_CLK_FCK1)
+		clk_enable(core.dss1_fck);
+	if (clks & DSS_CLK_FCK2)
+		clk_enable(core.dss2_fck);
+	if (clks & DSS_CLK_54M)
+		clk_enable(core.dss_54m_fck);
+	if (clks & DSS_CLK_96M)
+		clk_enable(core.dss_96m_fck);
+
+	core.num_clks_enabled += num_clks;
+}
+
+void dss_clk_enable(enum dss_clock clks)
+{
+	dss_clk_enable_no_ctx(clks);
+
+	if (cpu_is_omap34xx() && dss_need_ctx_restore())
+		restore_all_ctx();
+}
+
+static void dss_clk_disable_no_ctx(enum dss_clock clks)
+{
+	unsigned num_clks = count_clk_bits(clks);
+
+	if (clks & DSS_CLK_ICK)
+		clk_disable(core.dss_ick);
+	if (clks & DSS_CLK_FCK1)
+		clk_disable(core.dss1_fck);
+	if (clks & DSS_CLK_FCK2)
+		clk_disable(core.dss2_fck);
+	if (clks & DSS_CLK_54M)
+		clk_disable(core.dss_54m_fck);
+	if (clks & DSS_CLK_96M)
+		clk_disable(core.dss_96m_fck);
+
+	core.num_clks_enabled -= num_clks;
+}
+
+void dss_clk_disable(enum dss_clock clks)
+{
+	if (cpu_is_omap34xx()) {
+		unsigned num_clks = count_clk_bits(clks);
+
+		BUG_ON(core.num_clks_enabled < num_clks);
+
+		if (core.num_clks_enabled == num_clks)
+			save_all_ctx();
+	}
+
+	dss_clk_disable_no_ctx(clks);
+}
+
+static void dss_clk_enable_all_no_ctx(void)
+{
+	enum dss_clock clks;
+
+	clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M;
+	if (cpu_is_omap34xx())
+		clks |= DSS_CLK_96M;
+	dss_clk_enable_no_ctx(clks);
+}
+
+static void dss_clk_disable_all_no_ctx(void)
+{
+	enum dss_clock clks;
+
+	clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M;
+	if (cpu_is_omap34xx())
+		clks |= DSS_CLK_96M;
+	dss_clk_disable_no_ctx(clks);
+}
+
+static void dss_clk_disable_all(void)
+{
+	enum dss_clock clks;
+
+	clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M;
+	if (cpu_is_omap34xx())
+		clks |= DSS_CLK_96M;
+	dss_clk_disable(clks);
+}
+
+/* DEBUGFS */
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
+static void dss_debug_dump_clocks(struct seq_file *s)
+{
+	core_dump_clocks(s);
+	dss_dump_clocks(s);
+	dispc_dump_clocks(s);
+#ifdef CONFIG_OMAP2_DSS_DSI
+	dsi_dump_clocks(s);
+#endif
+}
+
+static int dss_debug_show(struct seq_file *s, void *unused)
+{
+	void (*func)(struct seq_file *) = s->private;
+	func(s);
+	return 0;
+}
+
+static int dss_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dss_debug_show, inode->i_private);
+}
+
+static const struct file_operations dss_debug_fops = {
+	.open           = dss_debug_open,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.release        = single_release,
+};
+
+static struct dentry *dss_debugfs_dir;
+
+static int dss_initialize_debugfs(void)
+{
+	dss_debugfs_dir = debugfs_create_dir("omapdss", NULL);
+	if (IS_ERR(dss_debugfs_dir)) {
+		int err = PTR_ERR(dss_debugfs_dir);
+		dss_debugfs_dir = NULL;
+		return err;
+	}
+
+	debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir,
+			&dss_debug_dump_clocks, &dss_debug_fops);
+
+	debugfs_create_file("dss", S_IRUGO, dss_debugfs_dir,
+			&dss_dump_regs, &dss_debug_fops);
+	debugfs_create_file("dispc", S_IRUGO, dss_debugfs_dir,
+			&dispc_dump_regs, &dss_debug_fops);
+#ifdef CONFIG_OMAP2_DSS_RFBI
+	debugfs_create_file("rfbi", S_IRUGO, dss_debugfs_dir,
+			&rfbi_dump_regs, &dss_debug_fops);
+#endif
+#ifdef CONFIG_OMAP2_DSS_DSI
+	debugfs_create_file("dsi", S_IRUGO, dss_debugfs_dir,
+			&dsi_dump_regs, &dss_debug_fops);
+#endif
+#ifdef CONFIG_OMAP2_DSS_VENC
+	debugfs_create_file("venc", S_IRUGO, dss_debugfs_dir,
+			&venc_dump_regs, &dss_debug_fops);
+#endif
+	return 0;
+}
+
+static void dss_uninitialize_debugfs(void)
+{
+	if (dss_debugfs_dir)
+		debugfs_remove_recursive(dss_debugfs_dir);
+}
+#endif /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
+
+/* PLATFORM DEVICE */
+static int omap_dss_probe(struct platform_device *pdev)
+{
+	struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+	int skip_init = 0;
+	int r;
+	int i;
+
+	core.pdev = pdev;
+
+	dss_init_overlay_managers(pdev);
+	dss_init_overlays(pdev);
+
+	r = dss_get_clocks();
+	if (r)
+		goto fail0;
+
+	dss_clk_enable_all_no_ctx();
+
+	core.ctx_id = dss_get_ctx_id();
+	DSSDBG("initial ctx id %u\n", core.ctx_id);
+
+#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT
+	/* DISPC_CONTROL */
+	if (omap_readl(0x48050440) & 1)	/* LCD enabled? */
+		skip_init = 1;
+#endif
+
+	r = dss_init(skip_init);
+	if (r) {
+		DSSERR("Failed to initialize DSS\n");
+		goto fail0;
+	}
+
+#ifdef CONFIG_OMAP2_DSS_RFBI
+	r = rfbi_init();
+	if (r) {
+		DSSERR("Failed to initialize rfbi\n");
+		goto fail0;
+	}
+#endif
+
+	r = dpi_init();
+	if (r) {
+		DSSERR("Failed to initialize dpi\n");
+		goto fail0;
+	}
+
+	r = dispc_init();
+	if (r) {
+		DSSERR("Failed to initialize dispc\n");
+		goto fail0;
+	}
+#ifdef CONFIG_OMAP2_DSS_VENC
+	r = venc_init(pdev);
+	if (r) {
+		DSSERR("Failed to initialize venc\n");
+		goto fail0;
+	}
+#endif
+	if (cpu_is_omap34xx()) {
+#ifdef CONFIG_OMAP2_DSS_SDI
+		r = sdi_init(skip_init);
+		if (r) {
+			DSSERR("Failed to initialize SDI\n");
+			goto fail0;
+		}
+#endif
+#ifdef CONFIG_OMAP2_DSS_DSI
+		r = dsi_init(pdev);
+		if (r) {
+			DSSERR("Failed to initialize DSI\n");
+			goto fail0;
+		}
+#endif
+	}
+
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
+	r = dss_initialize_debugfs();
+	if (r)
+		goto fail0;
+#endif
+
+	for (i = 0; i < pdata->num_devices; ++i) {
+		struct omap_dss_device *dssdev = pdata->devices[i];
+
+		r = omap_dss_register_device(dssdev);
+		if (r)
+			DSSERR("device reg failed %d\n", i);
+
+		if (def_disp_name && strcmp(def_disp_name, dssdev->name) == 0)
+			pdata->default_device = dssdev;
+	}
+
+	dss_clk_disable_all();
+
+	return 0;
+
+	/* XXX fail correctly */
+fail0:
+	return r;
+}
+
+static int omap_dss_remove(struct platform_device *pdev)
+{
+	struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+	int i;
+	int c;
+
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
+	dss_uninitialize_debugfs();
+#endif
+
+#ifdef CONFIG_OMAP2_DSS_VENC
+	venc_exit();
+#endif
+	dispc_exit();
+	dpi_exit();
+#ifdef CONFIG_OMAP2_DSS_RFBI
+	rfbi_exit();
+#endif
+	if (cpu_is_omap34xx()) {
+#ifdef CONFIG_OMAP2_DSS_DSI
+		dsi_exit();
+#endif
+#ifdef CONFIG_OMAP2_DSS_SDI
+		sdi_exit();
+#endif
+	}
+
+	dss_exit();
+
+	/* these should be removed at some point */
+	c = core.dss_ick->usecount;
+	if (c > 0) {
+		DSSERR("warning: dss_ick usecount %d, disabling\n", c);
+		while (c-- > 0)
+			clk_disable(core.dss_ick);
+	}
+
+	c = core.dss1_fck->usecount;
+	if (c > 0) {
+		DSSERR("warning: dss1_fck usecount %d, disabling\n", c);
+		while (c-- > 0)
+			clk_disable(core.dss1_fck);
+	}
+
+	c = core.dss2_fck->usecount;
+	if (c > 0) {
+		DSSERR("warning: dss2_fck usecount %d, disabling\n", c);
+		while (c-- > 0)
+			clk_disable(core.dss2_fck);
+	}
+
+	c = core.dss_54m_fck->usecount;
+	if (c > 0) {
+		DSSERR("warning: dss_54m_fck usecount %d, disabling\n", c);
+		while (c-- > 0)
+			clk_disable(core.dss_54m_fck);
+	}
+
+	if (core.dss_96m_fck) {
+		c = core.dss_96m_fck->usecount;
+		if (c > 0) {
+			DSSERR("warning: dss_96m_fck usecount %d, disabling\n",
+					c);
+			while (c-- > 0)
+				clk_disable(core.dss_96m_fck);
+		}
+	}
+
+	dss_put_clocks();
+
+	dss_uninit_overlays(pdev);
+	dss_uninit_overlay_managers(pdev);
+
+	for (i = 0; i < pdata->num_devices; ++i)
+		omap_dss_unregister_device(pdata->devices[i]);
+
+	return 0;
+}
+
+static void omap_dss_shutdown(struct platform_device *pdev)
+{
+	DSSDBG("shutdown\n");
+	dss_disable_all_devices();
+}
+
+static int omap_dss_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	DSSDBG("suspend %d\n", state.event);
+
+	return dss_suspend_all_devices();
+}
+
+static int omap_dss_resume(struct platform_device *pdev)
+{
+	DSSDBG("resume\n");
+
+	return dss_resume_all_devices();
+}
+
+static struct platform_driver omap_dss_driver = {
+	.probe          = omap_dss_probe,
+	.remove         = omap_dss_remove,
+	.shutdown	= omap_dss_shutdown,
+	.suspend	= omap_dss_suspend,
+	.resume		= omap_dss_resume,
+	.driver         = {
+		.name   = "omapdss",
+		.owner  = THIS_MODULE,
+	},
+};
+
+/* BUS */
+static int dss_bus_match(struct device *dev, struct device_driver *driver)
+{
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+
+	DSSDBG("bus_match. dev %s/%s, drv %s\n",
+			dev_name(dev), dssdev->driver_name, driver->name);
+
+	return strcmp(dssdev->driver_name, driver->name) == 0;
+}
+
+static ssize_t device_name_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			dssdev->name ?
+			dssdev->name : "");
+}
+
+static struct device_attribute default_dev_attrs[] = {
+	__ATTR(name, S_IRUGO, device_name_show, NULL),
+	__ATTR_NULL,
+};
+
+static ssize_t driver_name_show(struct device_driver *drv, char *buf)
+{
+	struct omap_dss_driver *dssdrv = to_dss_driver(drv);
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			dssdrv->driver.name ?
+			dssdrv->driver.name : "");
+}
+static struct driver_attribute default_drv_attrs[] = {
+	__ATTR(name, S_IRUGO, driver_name_show, NULL),
+	__ATTR_NULL,
+};
+
+static struct bus_type dss_bus_type = {
+	.name = "omapdss",
+	.match = dss_bus_match,
+	.dev_attrs = default_dev_attrs,
+	.drv_attrs = default_drv_attrs,
+};
+
+static void dss_bus_release(struct device *dev)
+{
+	DSSDBG("bus_release\n");
+}
+
+static struct device dss_bus = {
+	.release = dss_bus_release,
+};
+
+struct bus_type *dss_get_bus(void)
+{
+	return &dss_bus_type;
+}
+
+/* DRIVER */
+static int dss_driver_probe(struct device *dev)
+{
+	int r;
+	struct omap_dss_driver *dssdrv = to_dss_driver(dev->driver);
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+	struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
+	bool force;
+
+	DSSDBG("driver_probe: dev %s/%s, drv %s\n",
+				dev_name(dev), dssdev->driver_name,
+				dssdrv->driver.name);
+
+	dss_init_device(core.pdev, dssdev);
+
+	/* skip this if the device is behind a ctrl */
+	if (!dssdev->panel.ctrl) {
+		force = pdata->default_device == dssdev;
+		dss_recheck_connections(dssdev, force);
+	}
+
+	r = dssdrv->probe(dssdev);
+
+	if (r) {
+		DSSERR("driver probe failed: %d\n", r);
+		return r;
+	}
+
+	DSSDBG("probe done for device %s\n", dev_name(dev));
+
+	dssdev->driver = dssdrv;
+
+	return 0;
+}
+
+static int dss_driver_remove(struct device *dev)
+{
+	struct omap_dss_driver *dssdrv = to_dss_driver(dev->driver);
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+
+	DSSDBG("driver_remove: dev %s/%s\n", dev_name(dev),
+			dssdev->driver_name);
+
+	dssdrv->remove(dssdev);
+
+	dss_uninit_device(core.pdev, dssdev);
+
+	dssdev->driver = NULL;
+
+	return 0;
+}
+
+int omap_dss_register_driver(struct omap_dss_driver *dssdriver)
+{
+	dssdriver->driver.bus = &dss_bus_type;
+	dssdriver->driver.probe = dss_driver_probe;
+	dssdriver->driver.remove = dss_driver_remove;
+	return driver_register(&dssdriver->driver);
+}
+EXPORT_SYMBOL(omap_dss_register_driver);
+
+void omap_dss_unregister_driver(struct omap_dss_driver *dssdriver)
+{
+	driver_unregister(&dssdriver->driver);
+}
+EXPORT_SYMBOL(omap_dss_unregister_driver);
+
+/* DEVICE */
+static void reset_device(struct device *dev, int check)
+{
+	u8 *dev_p = (u8 *)dev;
+	u8 *dev_end = dev_p + sizeof(*dev);
+	void *saved_pdata;
+
+	saved_pdata = dev->platform_data;
+	if (check) {
+		/*
+		 * Check if there is any other setting than platform_data
+		 * in struct device; warn that these will be reset by our
+		 * init.
+		 */
+		dev->platform_data = NULL;
+		while (dev_p < dev_end) {
+			if (*dev_p) {
+				WARN("%s: struct device fields will be "
+						"discarded\n",
+				     __func__);
+				break;
+			}
+			dev_p++;
+		}
+	}
+	memset(dev, 0, sizeof(*dev));
+	dev->platform_data = saved_pdata;
+}
+
+
+static void omap_dss_dev_release(struct device *dev)
+{
+	reset_device(dev, 0);
+}
+
+int omap_dss_register_device(struct omap_dss_device *dssdev)
+{
+	static int dev_num;
+	static int panel_num;
+	int r;
+
+	WARN_ON(!dssdev->driver_name);
+
+	reset_device(&dssdev->dev, 1);
+	dssdev->dev.bus = &dss_bus_type;
+	dssdev->dev.parent = &dss_bus;
+	dssdev->dev.release = omap_dss_dev_release;
+	dev_set_name(&dssdev->dev, "display%d", dev_num++);
+	r = device_register(&dssdev->dev);
+	if (r)
+		return r;
+
+	if (dssdev->ctrl.panel) {
+		struct omap_dss_device *panel = dssdev->ctrl.panel;
+
+		panel->panel.ctrl = dssdev;
+
+		reset_device(&panel->dev, 1);
+		panel->dev.bus = &dss_bus_type;
+		panel->dev.parent = &dssdev->dev;
+		panel->dev.release = omap_dss_dev_release;
+		dev_set_name(&panel->dev, "panel%d", panel_num++);
+		r = device_register(&panel->dev);
+		if (r)
+			return r;
+	}
+
+	return 0;
+}
+
+void omap_dss_unregister_device(struct omap_dss_device *dssdev)
+{
+	device_unregister(&dssdev->dev);
+
+	if (dssdev->ctrl.panel) {
+		struct omap_dss_device *panel = dssdev->ctrl.panel;
+		device_unregister(&panel->dev);
+	}
+}
+
+/* BUS */
+static int omap_dss_bus_register(void)
+{
+	int r;
+
+	r = bus_register(&dss_bus_type);
+	if (r) {
+		DSSERR("bus register failed\n");
+		return r;
+	}
+
+	dev_set_name(&dss_bus, "omapdss");
+	r = device_register(&dss_bus);
+	if (r) {
+		DSSERR("bus driver register failed\n");
+		bus_unregister(&dss_bus_type);
+		return r;
+	}
+
+	return 0;
+}
+
+/* INIT */
+
+#ifdef CONFIG_OMAP2_DSS_MODULE
+static void omap_dss_bus_unregister(void)
+{
+	device_unregister(&dss_bus);
+
+	bus_unregister(&dss_bus_type);
+}
+
+static int __init omap_dss_init(void)
+{
+	int r;
+
+	r = omap_dss_bus_register();
+	if (r)
+		return r;
+
+	r = platform_driver_register(&omap_dss_driver);
+	if (r) {
+		omap_dss_bus_unregister();
+		return r;
+	}
+
+	return 0;
+}
+
+static void __exit omap_dss_exit(void)
+{
+	platform_driver_unregister(&omap_dss_driver);
+
+	omap_dss_bus_unregister();
+}
+
+module_init(omap_dss_init);
+module_exit(omap_dss_exit);
+#else
+static int __init omap_dss_init(void)
+{
+	return omap_dss_bus_register();
+}
+
+static int __init omap_dss_init2(void)
+{
+	return platform_driver_register(&omap_dss_driver);
+}
+
+core_initcall(omap_dss_init);
+device_initcall(omap_dss_init2);
+#endif
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>");
+MODULE_DESCRIPTION("OMAP2/3 Display Subsystem");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
new file mode 100644
index 0000000..6dabf4b
--- /dev/null
+++ b/drivers/video/omap2/dss/dispc.c
@@ -0,0 +1,3091 @@
+/*
+ * linux/drivers/video/omap2/dss/dispc.c
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "DISPC"
+
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/seq_file.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+
+#include <plat/sram.h>
+#include <plat/clock.h>
+
+#include <plat/display.h>
+
+#include "dss.h"
+
+/* DISPC */
+#define DISPC_BASE			0x48050400
+
+#define DISPC_SZ_REGS			SZ_1K
+
+struct dispc_reg { u16 idx; };
+
+#define DISPC_REG(idx)			((const struct dispc_reg) { idx })
+
+/* DISPC common */
+#define DISPC_REVISION			DISPC_REG(0x0000)
+#define DISPC_SYSCONFIG			DISPC_REG(0x0010)
+#define DISPC_SYSSTATUS			DISPC_REG(0x0014)
+#define DISPC_IRQSTATUS			DISPC_REG(0x0018)
+#define DISPC_IRQENABLE			DISPC_REG(0x001C)
+#define DISPC_CONTROL			DISPC_REG(0x0040)
+#define DISPC_CONFIG			DISPC_REG(0x0044)
+#define DISPC_CAPABLE			DISPC_REG(0x0048)
+#define DISPC_DEFAULT_COLOR0		DISPC_REG(0x004C)
+#define DISPC_DEFAULT_COLOR1		DISPC_REG(0x0050)
+#define DISPC_TRANS_COLOR0		DISPC_REG(0x0054)
+#define DISPC_TRANS_COLOR1		DISPC_REG(0x0058)
+#define DISPC_LINE_STATUS		DISPC_REG(0x005C)
+#define DISPC_LINE_NUMBER		DISPC_REG(0x0060)
+#define DISPC_TIMING_H			DISPC_REG(0x0064)
+#define DISPC_TIMING_V			DISPC_REG(0x0068)
+#define DISPC_POL_FREQ			DISPC_REG(0x006C)
+#define DISPC_DIVISOR			DISPC_REG(0x0070)
+#define DISPC_GLOBAL_ALPHA		DISPC_REG(0x0074)
+#define DISPC_SIZE_DIG			DISPC_REG(0x0078)
+#define DISPC_SIZE_LCD			DISPC_REG(0x007C)
+
+/* DISPC GFX plane */
+#define DISPC_GFX_BA0			DISPC_REG(0x0080)
+#define DISPC_GFX_BA1			DISPC_REG(0x0084)
+#define DISPC_GFX_POSITION		DISPC_REG(0x0088)
+#define DISPC_GFX_SIZE			DISPC_REG(0x008C)
+#define DISPC_GFX_ATTRIBUTES		DISPC_REG(0x00A0)
+#define DISPC_GFX_FIFO_THRESHOLD	DISPC_REG(0x00A4)
+#define DISPC_GFX_FIFO_SIZE_STATUS	DISPC_REG(0x00A8)
+#define DISPC_GFX_ROW_INC		DISPC_REG(0x00AC)
+#define DISPC_GFX_PIXEL_INC		DISPC_REG(0x00B0)
+#define DISPC_GFX_WINDOW_SKIP		DISPC_REG(0x00B4)
+#define DISPC_GFX_TABLE_BA		DISPC_REG(0x00B8)
+
+#define DISPC_DATA_CYCLE1		DISPC_REG(0x01D4)
+#define DISPC_DATA_CYCLE2		DISPC_REG(0x01D8)
+#define DISPC_DATA_CYCLE3		DISPC_REG(0x01DC)
+
+#define DISPC_CPR_COEF_R		DISPC_REG(0x0220)
+#define DISPC_CPR_COEF_G		DISPC_REG(0x0224)
+#define DISPC_CPR_COEF_B		DISPC_REG(0x0228)
+
+#define DISPC_GFX_PRELOAD		DISPC_REG(0x022C)
+
+/* DISPC Video plane, n = 0 for VID1 and n = 1 for VID2 */
+#define DISPC_VID_REG(n, idx)		DISPC_REG(0x00BC + (n)*0x90 + idx)
+
+#define DISPC_VID_BA0(n)		DISPC_VID_REG(n, 0x0000)
+#define DISPC_VID_BA1(n)		DISPC_VID_REG(n, 0x0004)
+#define DISPC_VID_POSITION(n)		DISPC_VID_REG(n, 0x0008)
+#define DISPC_VID_SIZE(n)		DISPC_VID_REG(n, 0x000C)
+#define DISPC_VID_ATTRIBUTES(n)		DISPC_VID_REG(n, 0x0010)
+#define DISPC_VID_FIFO_THRESHOLD(n)	DISPC_VID_REG(n, 0x0014)
+#define DISPC_VID_FIFO_SIZE_STATUS(n)	DISPC_VID_REG(n, 0x0018)
+#define DISPC_VID_ROW_INC(n)		DISPC_VID_REG(n, 0x001C)
+#define DISPC_VID_PIXEL_INC(n)		DISPC_VID_REG(n, 0x0020)
+#define DISPC_VID_FIR(n)		DISPC_VID_REG(n, 0x0024)
+#define DISPC_VID_PICTURE_SIZE(n)	DISPC_VID_REG(n, 0x0028)
+#define DISPC_VID_ACCU0(n)		DISPC_VID_REG(n, 0x002C)
+#define DISPC_VID_ACCU1(n)		DISPC_VID_REG(n, 0x0030)
+
+/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
+#define DISPC_VID_FIR_COEF_H(n, i)	DISPC_REG(0x00F0 + (n)*0x90 + (i)*0x8)
+/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
+#define DISPC_VID_FIR_COEF_HV(n, i)	DISPC_REG(0x00F4 + (n)*0x90 + (i)*0x8)
+/* coef index i = {0, 1, 2, 3, 4} */
+#define DISPC_VID_CONV_COEF(n, i)	DISPC_REG(0x0130 + (n)*0x90 + (i)*0x4)
+/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
+#define DISPC_VID_FIR_COEF_V(n, i)	DISPC_REG(0x01E0 + (n)*0x20 + (i)*0x4)
+
+#define DISPC_VID_PRELOAD(n)		DISPC_REG(0x230 + (n)*0x04)
+
+
+#define DISPC_IRQ_MASK_ERROR            (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \
+					 DISPC_IRQ_OCP_ERR | \
+					 DISPC_IRQ_VID1_FIFO_UNDERFLOW | \
+					 DISPC_IRQ_VID2_FIFO_UNDERFLOW | \
+					 DISPC_IRQ_SYNC_LOST | \
+					 DISPC_IRQ_SYNC_LOST_DIGIT)
+
+#define DISPC_MAX_NR_ISRS		8
+
+struct omap_dispc_isr_data {
+	omap_dispc_isr_t	isr;
+	void			*arg;
+	u32			mask;
+};
+
+#define REG_GET(idx, start, end) \
+	FLD_GET(dispc_read_reg(idx), start, end)
+
+#define REG_FLD_MOD(idx, val, start, end)				\
+	dispc_write_reg(idx, FLD_MOD(dispc_read_reg(idx), val, start, end))
+
+static const struct dispc_reg dispc_reg_att[] = { DISPC_GFX_ATTRIBUTES,
+	DISPC_VID_ATTRIBUTES(0),
+	DISPC_VID_ATTRIBUTES(1) };
+
+static struct {
+	void __iomem    *base;
+
+	u32	fifo_size[3];
+
+	spinlock_t irq_lock;
+	u32 irq_error_mask;
+	struct omap_dispc_isr_data registered_isr[DISPC_MAX_NR_ISRS];
+	u32 error_irqs;
+	struct work_struct error_work;
+
+	u32		ctx[DISPC_SZ_REGS / sizeof(u32)];
+} dispc;
+
+static void _omap_dispc_set_irqs(void);
+
+static inline void dispc_write_reg(const struct dispc_reg idx, u32 val)
+{
+	__raw_writel(val, dispc.base + idx.idx);
+}
+
+static inline u32 dispc_read_reg(const struct dispc_reg idx)
+{
+	return __raw_readl(dispc.base + idx.idx);
+}
+
+#define SR(reg) \
+	dispc.ctx[(DISPC_##reg).idx / sizeof(u32)] = dispc_read_reg(DISPC_##reg)
+#define RR(reg) \
+	dispc_write_reg(DISPC_##reg, dispc.ctx[(DISPC_##reg).idx / sizeof(u32)])
+
+void dispc_save_context(void)
+{
+	if (cpu_is_omap24xx())
+		return;
+
+	SR(SYSCONFIG);
+	SR(IRQENABLE);
+	SR(CONTROL);
+	SR(CONFIG);
+	SR(DEFAULT_COLOR0);
+	SR(DEFAULT_COLOR1);
+	SR(TRANS_COLOR0);
+	SR(TRANS_COLOR1);
+	SR(LINE_NUMBER);
+	SR(TIMING_H);
+	SR(TIMING_V);
+	SR(POL_FREQ);
+	SR(DIVISOR);
+	SR(GLOBAL_ALPHA);
+	SR(SIZE_DIG);
+	SR(SIZE_LCD);
+
+	SR(GFX_BA0);
+	SR(GFX_BA1);
+	SR(GFX_POSITION);
+	SR(GFX_SIZE);
+	SR(GFX_ATTRIBUTES);
+	SR(GFX_FIFO_THRESHOLD);
+	SR(GFX_ROW_INC);
+	SR(GFX_PIXEL_INC);
+	SR(GFX_WINDOW_SKIP);
+	SR(GFX_TABLE_BA);
+
+	SR(DATA_CYCLE1);
+	SR(DATA_CYCLE2);
+	SR(DATA_CYCLE3);
+
+	SR(CPR_COEF_R);
+	SR(CPR_COEF_G);
+	SR(CPR_COEF_B);
+
+	SR(GFX_PRELOAD);
+
+	/* VID1 */
+	SR(VID_BA0(0));
+	SR(VID_BA1(0));
+	SR(VID_POSITION(0));
+	SR(VID_SIZE(0));
+	SR(VID_ATTRIBUTES(0));
+	SR(VID_FIFO_THRESHOLD(0));
+	SR(VID_ROW_INC(0));
+	SR(VID_PIXEL_INC(0));
+	SR(VID_FIR(0));
+	SR(VID_PICTURE_SIZE(0));
+	SR(VID_ACCU0(0));
+	SR(VID_ACCU1(0));
+
+	SR(VID_FIR_COEF_H(0, 0));
+	SR(VID_FIR_COEF_H(0, 1));
+	SR(VID_FIR_COEF_H(0, 2));
+	SR(VID_FIR_COEF_H(0, 3));
+	SR(VID_FIR_COEF_H(0, 4));
+	SR(VID_FIR_COEF_H(0, 5));
+	SR(VID_FIR_COEF_H(0, 6));
+	SR(VID_FIR_COEF_H(0, 7));
+
+	SR(VID_FIR_COEF_HV(0, 0));
+	SR(VID_FIR_COEF_HV(0, 1));
+	SR(VID_FIR_COEF_HV(0, 2));
+	SR(VID_FIR_COEF_HV(0, 3));
+	SR(VID_FIR_COEF_HV(0, 4));
+	SR(VID_FIR_COEF_HV(0, 5));
+	SR(VID_FIR_COEF_HV(0, 6));
+	SR(VID_FIR_COEF_HV(0, 7));
+
+	SR(VID_CONV_COEF(0, 0));
+	SR(VID_CONV_COEF(0, 1));
+	SR(VID_CONV_COEF(0, 2));
+	SR(VID_CONV_COEF(0, 3));
+	SR(VID_CONV_COEF(0, 4));
+
+	SR(VID_FIR_COEF_V(0, 0));
+	SR(VID_FIR_COEF_V(0, 1));
+	SR(VID_FIR_COEF_V(0, 2));
+	SR(VID_FIR_COEF_V(0, 3));
+	SR(VID_FIR_COEF_V(0, 4));
+	SR(VID_FIR_COEF_V(0, 5));
+	SR(VID_FIR_COEF_V(0, 6));
+	SR(VID_FIR_COEF_V(0, 7));
+
+	SR(VID_PRELOAD(0));
+
+	/* VID2 */
+	SR(VID_BA0(1));
+	SR(VID_BA1(1));
+	SR(VID_POSITION(1));
+	SR(VID_SIZE(1));
+	SR(VID_ATTRIBUTES(1));
+	SR(VID_FIFO_THRESHOLD(1));
+	SR(VID_ROW_INC(1));
+	SR(VID_PIXEL_INC(1));
+	SR(VID_FIR(1));
+	SR(VID_PICTURE_SIZE(1));
+	SR(VID_ACCU0(1));
+	SR(VID_ACCU1(1));
+
+	SR(VID_FIR_COEF_H(1, 0));
+	SR(VID_FIR_COEF_H(1, 1));
+	SR(VID_FIR_COEF_H(1, 2));
+	SR(VID_FIR_COEF_H(1, 3));
+	SR(VID_FIR_COEF_H(1, 4));
+	SR(VID_FIR_COEF_H(1, 5));
+	SR(VID_FIR_COEF_H(1, 6));
+	SR(VID_FIR_COEF_H(1, 7));
+
+	SR(VID_FIR_COEF_HV(1, 0));
+	SR(VID_FIR_COEF_HV(1, 1));
+	SR(VID_FIR_COEF_HV(1, 2));
+	SR(VID_FIR_COEF_HV(1, 3));
+	SR(VID_FIR_COEF_HV(1, 4));
+	SR(VID_FIR_COEF_HV(1, 5));
+	SR(VID_FIR_COEF_HV(1, 6));
+	SR(VID_FIR_COEF_HV(1, 7));
+
+	SR(VID_CONV_COEF(1, 0));
+	SR(VID_CONV_COEF(1, 1));
+	SR(VID_CONV_COEF(1, 2));
+	SR(VID_CONV_COEF(1, 3));
+	SR(VID_CONV_COEF(1, 4));
+
+	SR(VID_FIR_COEF_V(1, 0));
+	SR(VID_FIR_COEF_V(1, 1));
+	SR(VID_FIR_COEF_V(1, 2));
+	SR(VID_FIR_COEF_V(1, 3));
+	SR(VID_FIR_COEF_V(1, 4));
+	SR(VID_FIR_COEF_V(1, 5));
+	SR(VID_FIR_COEF_V(1, 6));
+	SR(VID_FIR_COEF_V(1, 7));
+
+	SR(VID_PRELOAD(1));
+}
+
+void dispc_restore_context(void)
+{
+	RR(SYSCONFIG);
+	RR(IRQENABLE);
+	/*RR(CONTROL);*/
+	RR(CONFIG);
+	RR(DEFAULT_COLOR0);
+	RR(DEFAULT_COLOR1);
+	RR(TRANS_COLOR0);
+	RR(TRANS_COLOR1);
+	RR(LINE_NUMBER);
+	RR(TIMING_H);
+	RR(TIMING_V);
+	RR(POL_FREQ);
+	RR(DIVISOR);
+	RR(GLOBAL_ALPHA);
+	RR(SIZE_DIG);
+	RR(SIZE_LCD);
+
+	RR(GFX_BA0);
+	RR(GFX_BA1);
+	RR(GFX_POSITION);
+	RR(GFX_SIZE);
+	RR(GFX_ATTRIBUTES);
+	RR(GFX_FIFO_THRESHOLD);
+	RR(GFX_ROW_INC);
+	RR(GFX_PIXEL_INC);
+	RR(GFX_WINDOW_SKIP);
+	RR(GFX_TABLE_BA);
+
+	RR(DATA_CYCLE1);
+	RR(DATA_CYCLE2);
+	RR(DATA_CYCLE3);
+
+	RR(CPR_COEF_R);
+	RR(CPR_COEF_G);
+	RR(CPR_COEF_B);
+
+	RR(GFX_PRELOAD);
+
+	/* VID1 */
+	RR(VID_BA0(0));
+	RR(VID_BA1(0));
+	RR(VID_POSITION(0));
+	RR(VID_SIZE(0));
+	RR(VID_ATTRIBUTES(0));
+	RR(VID_FIFO_THRESHOLD(0));
+	RR(VID_ROW_INC(0));
+	RR(VID_PIXEL_INC(0));
+	RR(VID_FIR(0));
+	RR(VID_PICTURE_SIZE(0));
+	RR(VID_ACCU0(0));
+	RR(VID_ACCU1(0));
+
+	RR(VID_FIR_COEF_H(0, 0));
+	RR(VID_FIR_COEF_H(0, 1));
+	RR(VID_FIR_COEF_H(0, 2));
+	RR(VID_FIR_COEF_H(0, 3));
+	RR(VID_FIR_COEF_H(0, 4));
+	RR(VID_FIR_COEF_H(0, 5));
+	RR(VID_FIR_COEF_H(0, 6));
+	RR(VID_FIR_COEF_H(0, 7));
+
+	RR(VID_FIR_COEF_HV(0, 0));
+	RR(VID_FIR_COEF_HV(0, 1));
+	RR(VID_FIR_COEF_HV(0, 2));
+	RR(VID_FIR_COEF_HV(0, 3));
+	RR(VID_FIR_COEF_HV(0, 4));
+	RR(VID_FIR_COEF_HV(0, 5));
+	RR(VID_FIR_COEF_HV(0, 6));
+	RR(VID_FIR_COEF_HV(0, 7));
+
+	RR(VID_CONV_COEF(0, 0));
+	RR(VID_CONV_COEF(0, 1));
+	RR(VID_CONV_COEF(0, 2));
+	RR(VID_CONV_COEF(0, 3));
+	RR(VID_CONV_COEF(0, 4));
+
+	RR(VID_FIR_COEF_V(0, 0));
+	RR(VID_FIR_COEF_V(0, 1));
+	RR(VID_FIR_COEF_V(0, 2));
+	RR(VID_FIR_COEF_V(0, 3));
+	RR(VID_FIR_COEF_V(0, 4));
+	RR(VID_FIR_COEF_V(0, 5));
+	RR(VID_FIR_COEF_V(0, 6));
+	RR(VID_FIR_COEF_V(0, 7));
+
+	RR(VID_PRELOAD(0));
+
+	/* VID2 */
+	RR(VID_BA0(1));
+	RR(VID_BA1(1));
+	RR(VID_POSITION(1));
+	RR(VID_SIZE(1));
+	RR(VID_ATTRIBUTES(1));
+	RR(VID_FIFO_THRESHOLD(1));
+	RR(VID_ROW_INC(1));
+	RR(VID_PIXEL_INC(1));
+	RR(VID_FIR(1));
+	RR(VID_PICTURE_SIZE(1));
+	RR(VID_ACCU0(1));
+	RR(VID_ACCU1(1));
+
+	RR(VID_FIR_COEF_H(1, 0));
+	RR(VID_FIR_COEF_H(1, 1));
+	RR(VID_FIR_COEF_H(1, 2));
+	RR(VID_FIR_COEF_H(1, 3));
+	RR(VID_FIR_COEF_H(1, 4));
+	RR(VID_FIR_COEF_H(1, 5));
+	RR(VID_FIR_COEF_H(1, 6));
+	RR(VID_FIR_COEF_H(1, 7));
+
+	RR(VID_FIR_COEF_HV(1, 0));
+	RR(VID_FIR_COEF_HV(1, 1));
+	RR(VID_FIR_COEF_HV(1, 2));
+	RR(VID_FIR_COEF_HV(1, 3));
+	RR(VID_FIR_COEF_HV(1, 4));
+	RR(VID_FIR_COEF_HV(1, 5));
+	RR(VID_FIR_COEF_HV(1, 6));
+	RR(VID_FIR_COEF_HV(1, 7));
+
+	RR(VID_CONV_COEF(1, 0));
+	RR(VID_CONV_COEF(1, 1));
+	RR(VID_CONV_COEF(1, 2));
+	RR(VID_CONV_COEF(1, 3));
+	RR(VID_CONV_COEF(1, 4));
+
+	RR(VID_FIR_COEF_V(1, 0));
+	RR(VID_FIR_COEF_V(1, 1));
+	RR(VID_FIR_COEF_V(1, 2));
+	RR(VID_FIR_COEF_V(1, 3));
+	RR(VID_FIR_COEF_V(1, 4));
+	RR(VID_FIR_COEF_V(1, 5));
+	RR(VID_FIR_COEF_V(1, 6));
+	RR(VID_FIR_COEF_V(1, 7));
+
+	RR(VID_PRELOAD(1));
+
+	/* enable last, because LCD & DIGIT enable are here */
+	RR(CONTROL);
+}
+
+#undef SR
+#undef RR
+
+static inline void enable_clocks(bool enable)
+{
+	if (enable)
+		dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+	else
+		dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+}
+
+bool dispc_go_busy(enum omap_channel channel)
+{
+	int bit;
+
+	if (channel == OMAP_DSS_CHANNEL_LCD)
+		bit = 5; /* GOLCD */
+	else
+		bit = 6; /* GODIGIT */
+
+	return REG_GET(DISPC_CONTROL, bit, bit) == 1;
+}
+
+void dispc_go(enum omap_channel channel)
+{
+	int bit;
+
+	enable_clocks(1);
+
+	if (channel == OMAP_DSS_CHANNEL_LCD)
+		bit = 0; /* LCDENABLE */
+	else
+		bit = 1; /* DIGITALENABLE */
+
+	/* if the channel is not enabled, we don't need GO */
+	if (REG_GET(DISPC_CONTROL, bit, bit) == 0)
+		goto end;
+
+	if (channel == OMAP_DSS_CHANNEL_LCD)
+		bit = 5; /* GOLCD */
+	else
+		bit = 6; /* GODIGIT */
+
+	if (REG_GET(DISPC_CONTROL, bit, bit) == 1) {
+		DSSERR("GO bit not down for channel %d\n", channel);
+		goto end;
+	}
+
+	DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" : "DIGIT");
+
+	REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit);
+end:
+	enable_clocks(0);
+}
+
+static void _dispc_write_firh_reg(enum omap_plane plane, int reg, u32 value)
+{
+	BUG_ON(plane == OMAP_DSS_GFX);
+
+	dispc_write_reg(DISPC_VID_FIR_COEF_H(plane-1, reg), value);
+}
+
+static void _dispc_write_firhv_reg(enum omap_plane plane, int reg, u32 value)
+{
+	BUG_ON(plane == OMAP_DSS_GFX);
+
+	dispc_write_reg(DISPC_VID_FIR_COEF_HV(plane-1, reg), value);
+}
+
+static void _dispc_write_firv_reg(enum omap_plane plane, int reg, u32 value)
+{
+	BUG_ON(plane == OMAP_DSS_GFX);
+
+	dispc_write_reg(DISPC_VID_FIR_COEF_V(plane-1, reg), value);
+}
+
+static void _dispc_set_scale_coef(enum omap_plane plane, int hscaleup,
+		int vscaleup, int five_taps)
+{
+	/* Coefficients for horizontal up-sampling */
+	static const u32 coef_hup[8] = {
+		0x00800000,
+		0x0D7CF800,
+		0x1E70F5FF,
+		0x335FF5FE,
+		0xF74949F7,
+		0xF55F33FB,
+		0xF5701EFE,
+		0xF87C0DFF,
+	};
+
+	/* Coefficients for horizontal down-sampling */
+	static const u32 coef_hdown[8] = {
+		0x24382400,
+		0x28371FFE,
+		0x2C361BFB,
+		0x303516F9,
+		0x11343311,
+		0x1635300C,
+		0x1B362C08,
+		0x1F372804,
+	};
+
+	/* Coefficients for horizontal and vertical up-sampling */
+	static const u32 coef_hvup[2][8] = {
+		{
+		0x00800000,
+		0x037B02FF,
+		0x0C6F05FE,
+		0x205907FB,
+		0x00404000,
+		0x075920FE,
+		0x056F0CFF,
+		0x027B0300,
+		},
+		{
+		0x00800000,
+		0x0D7CF8FF,
+		0x1E70F5FE,
+		0x335FF5FB,
+		0xF7404000,
+		0xF55F33FE,
+		0xF5701EFF,
+		0xF87C0D00,
+		},
+	};
+
+	/* Coefficients for horizontal and vertical down-sampling */
+	static const u32 coef_hvdown[2][8] = {
+		{
+		0x24382400,
+		0x28391F04,
+		0x2D381B08,
+		0x3237170C,
+		0x123737F7,
+		0x173732F9,
+		0x1B382DFB,
+		0x1F3928FE,
+		},
+		{
+		0x24382400,
+		0x28371F04,
+		0x2C361B08,
+		0x3035160C,
+		0x113433F7,
+		0x163530F9,
+		0x1B362CFB,
+		0x1F3728FE,
+		},
+	};
+
+	/* Coefficients for vertical up-sampling */
+	static const u32 coef_vup[8] = {
+		0x00000000,
+		0x0000FF00,
+		0x0000FEFF,
+		0x0000FBFE,
+		0x000000F7,
+		0x0000FEFB,
+		0x0000FFFE,
+		0x000000FF,
+	};
+
+
+	/* Coefficients for vertical down-sampling */
+	static const u32 coef_vdown[8] = {
+		0x00000000,
+		0x000004FE,
+		0x000008FB,
+		0x00000CF9,
+		0x0000F711,
+		0x0000F90C,
+		0x0000FB08,
+		0x0000FE04,
+	};
+
+	const u32 *h_coef;
+	const u32 *hv_coef;
+	const u32 *hv_coef_mod;
+	const u32 *v_coef;
+	int i;
+
+	if (hscaleup)
+		h_coef = coef_hup;
+	else
+		h_coef = coef_hdown;
+
+	if (vscaleup) {
+		hv_coef = coef_hvup[five_taps];
+		v_coef = coef_vup;
+
+		if (hscaleup)
+			hv_coef_mod = NULL;
+		else
+			hv_coef_mod = coef_hvdown[five_taps];
+	} else {
+		hv_coef = coef_hvdown[five_taps];
+		v_coef = coef_vdown;
+
+		if (hscaleup)
+			hv_coef_mod = coef_hvup[five_taps];
+		else
+			hv_coef_mod = NULL;
+	}
+
+	for (i = 0; i < 8; i++) {
+		u32 h, hv;
+
+		h = h_coef[i];
+
+		hv = hv_coef[i];
+
+		if (hv_coef_mod) {
+			hv &= 0xffffff00;
+			hv |= (hv_coef_mod[i] & 0xff);
+		}
+
+		_dispc_write_firh_reg(plane, i, h);
+		_dispc_write_firhv_reg(plane, i, hv);
+	}
+
+	if (!five_taps)
+		return;
+
+	for (i = 0; i < 8; i++) {
+		u32 v;
+		v = v_coef[i];
+		_dispc_write_firv_reg(plane, i, v);
+	}
+}
+
+static void _dispc_setup_color_conv_coef(void)
+{
+	const struct color_conv_coef {
+		int  ry,  rcr,  rcb,   gy,  gcr,  gcb,   by,  bcr,  bcb;
+		int  full_range;
+	}  ctbl_bt601_5 = {
+		298,  409,    0,  298, -208, -100,  298,    0,  517, 0,
+	};
+
+	const struct color_conv_coef *ct;
+
+#define CVAL(x, y) (FLD_VAL(x, 26, 16) | FLD_VAL(y, 10, 0))
+
+	ct = &ctbl_bt601_5;
+
+	dispc_write_reg(DISPC_VID_CONV_COEF(0, 0), CVAL(ct->rcr, ct->ry));
+	dispc_write_reg(DISPC_VID_CONV_COEF(0, 1), CVAL(ct->gy,	 ct->rcb));
+	dispc_write_reg(DISPC_VID_CONV_COEF(0, 2), CVAL(ct->gcb, ct->gcr));
+	dispc_write_reg(DISPC_VID_CONV_COEF(0, 3), CVAL(ct->bcr, ct->by));
+	dispc_write_reg(DISPC_VID_CONV_COEF(0, 4), CVAL(0,       ct->bcb));
+
+	dispc_write_reg(DISPC_VID_CONV_COEF(1, 0), CVAL(ct->rcr, ct->ry));
+	dispc_write_reg(DISPC_VID_CONV_COEF(1, 1), CVAL(ct->gy,	 ct->rcb));
+	dispc_write_reg(DISPC_VID_CONV_COEF(1, 2), CVAL(ct->gcb, ct->gcr));
+	dispc_write_reg(DISPC_VID_CONV_COEF(1, 3), CVAL(ct->bcr, ct->by));
+	dispc_write_reg(DISPC_VID_CONV_COEF(1, 4), CVAL(0,       ct->bcb));
+
+#undef CVAL
+
+	REG_FLD_MOD(DISPC_VID_ATTRIBUTES(0), ct->full_range, 11, 11);
+	REG_FLD_MOD(DISPC_VID_ATTRIBUTES(1), ct->full_range, 11, 11);
+}
+
+
+static void _dispc_set_plane_ba0(enum omap_plane plane, u32 paddr)
+{
+	const struct dispc_reg ba0_reg[] = { DISPC_GFX_BA0,
+		DISPC_VID_BA0(0),
+		DISPC_VID_BA0(1) };
+
+	dispc_write_reg(ba0_reg[plane], paddr);
+}
+
+static void _dispc_set_plane_ba1(enum omap_plane plane, u32 paddr)
+{
+	const struct dispc_reg ba1_reg[] = { DISPC_GFX_BA1,
+				      DISPC_VID_BA1(0),
+				      DISPC_VID_BA1(1) };
+
+	dispc_write_reg(ba1_reg[plane], paddr);
+}
+
+static void _dispc_set_plane_pos(enum omap_plane plane, int x, int y)
+{
+	const struct dispc_reg pos_reg[] = { DISPC_GFX_POSITION,
+				      DISPC_VID_POSITION(0),
+				      DISPC_VID_POSITION(1) };
+
+	u32 val = FLD_VAL(y, 26, 16) | FLD_VAL(x, 10, 0);
+	dispc_write_reg(pos_reg[plane], val);
+}
+
+static void _dispc_set_pic_size(enum omap_plane plane, int width, int height)
+{
+	const struct dispc_reg siz_reg[] = { DISPC_GFX_SIZE,
+				      DISPC_VID_PICTURE_SIZE(0),
+				      DISPC_VID_PICTURE_SIZE(1) };
+	u32 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
+	dispc_write_reg(siz_reg[plane], val);
+}
+
+static void _dispc_set_vid_size(enum omap_plane plane, int width, int height)
+{
+	u32 val;
+	const struct dispc_reg vsi_reg[] = { DISPC_VID_SIZE(0),
+				      DISPC_VID_SIZE(1) };
+
+	BUG_ON(plane == OMAP_DSS_GFX);
+
+	val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
+	dispc_write_reg(vsi_reg[plane-1], val);
+}
+
+static void _dispc_setup_global_alpha(enum omap_plane plane, u8 global_alpha)
+{
+
+	BUG_ON(plane == OMAP_DSS_VIDEO1);
+
+	if (cpu_is_omap24xx())
+		return;
+
+	if (plane == OMAP_DSS_GFX)
+		REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, 7, 0);
+	else if (plane == OMAP_DSS_VIDEO2)
+		REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, 23, 16);
+}
+
+static void _dispc_set_pix_inc(enum omap_plane plane, s32 inc)
+{
+	const struct dispc_reg ri_reg[] = { DISPC_GFX_PIXEL_INC,
+				     DISPC_VID_PIXEL_INC(0),
+				     DISPC_VID_PIXEL_INC(1) };
+
+	dispc_write_reg(ri_reg[plane], inc);
+}
+
+static void _dispc_set_row_inc(enum omap_plane plane, s32 inc)
+{
+	const struct dispc_reg ri_reg[] = { DISPC_GFX_ROW_INC,
+				     DISPC_VID_ROW_INC(0),
+				     DISPC_VID_ROW_INC(1) };
+
+	dispc_write_reg(ri_reg[plane], inc);
+}
+
+static void _dispc_set_color_mode(enum omap_plane plane,
+		enum omap_color_mode color_mode)
+{
+	u32 m = 0;
+
+	switch (color_mode) {
+	case OMAP_DSS_COLOR_CLUT1:
+		m = 0x0; break;
+	case OMAP_DSS_COLOR_CLUT2:
+		m = 0x1; break;
+	case OMAP_DSS_COLOR_CLUT4:
+		m = 0x2; break;
+	case OMAP_DSS_COLOR_CLUT8:
+		m = 0x3; break;
+	case OMAP_DSS_COLOR_RGB12U:
+		m = 0x4; break;
+	case OMAP_DSS_COLOR_ARGB16:
+		m = 0x5; break;
+	case OMAP_DSS_COLOR_RGB16:
+		m = 0x6; break;
+	case OMAP_DSS_COLOR_RGB24U:
+		m = 0x8; break;
+	case OMAP_DSS_COLOR_RGB24P:
+		m = 0x9; break;
+	case OMAP_DSS_COLOR_YUV2:
+		m = 0xa; break;
+	case OMAP_DSS_COLOR_UYVY:
+		m = 0xb; break;
+	case OMAP_DSS_COLOR_ARGB32:
+		m = 0xc; break;
+	case OMAP_DSS_COLOR_RGBA32:
+		m = 0xd; break;
+	case OMAP_DSS_COLOR_RGBX32:
+		m = 0xe; break;
+	default:
+		BUG(); break;
+	}
+
+	REG_FLD_MOD(dispc_reg_att[plane], m, 4, 1);
+}
+
+static void _dispc_set_channel_out(enum omap_plane plane,
+		enum omap_channel channel)
+{
+	int shift;
+	u32 val;
+
+	switch (plane) {
+	case OMAP_DSS_GFX:
+		shift = 8;
+		break;
+	case OMAP_DSS_VIDEO1:
+	case OMAP_DSS_VIDEO2:
+		shift = 16;
+		break;
+	default:
+		BUG();
+		return;
+	}
+
+	val = dispc_read_reg(dispc_reg_att[plane]);
+	val = FLD_MOD(val, channel, shift, shift);
+	dispc_write_reg(dispc_reg_att[plane], val);
+}
+
+void dispc_set_burst_size(enum omap_plane plane,
+		enum omap_burst_size burst_size)
+{
+	int shift;
+	u32 val;
+
+	enable_clocks(1);
+
+	switch (plane) {
+	case OMAP_DSS_GFX:
+		shift = 6;
+		break;
+	case OMAP_DSS_VIDEO1:
+	case OMAP_DSS_VIDEO2:
+		shift = 14;
+		break;
+	default:
+		BUG();
+		return;
+	}
+
+	val = dispc_read_reg(dispc_reg_att[plane]);
+	val = FLD_MOD(val, burst_size, shift+1, shift);
+	dispc_write_reg(dispc_reg_att[plane], val);
+
+	enable_clocks(0);
+}
+
+static void _dispc_set_vid_color_conv(enum omap_plane plane, bool enable)
+{
+	u32 val;
+
+	BUG_ON(plane == OMAP_DSS_GFX);
+
+	val = dispc_read_reg(dispc_reg_att[plane]);
+	val = FLD_MOD(val, enable, 9, 9);
+	dispc_write_reg(dispc_reg_att[plane], val);
+}
+
+void dispc_enable_replication(enum omap_plane plane, bool enable)
+{
+	int bit;
+
+	if (plane == OMAP_DSS_GFX)
+		bit = 5;
+	else
+		bit = 10;
+
+	enable_clocks(1);
+	REG_FLD_MOD(dispc_reg_att[plane], enable, bit, bit);
+	enable_clocks(0);
+}
+
+void dispc_set_lcd_size(u16 width, u16 height)
+{
+	u32 val;
+	BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
+	val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
+	enable_clocks(1);
+	dispc_write_reg(DISPC_SIZE_LCD, val);
+	enable_clocks(0);
+}
+
+void dispc_set_digit_size(u16 width, u16 height)
+{
+	u32 val;
+	BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
+	val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
+	enable_clocks(1);
+	dispc_write_reg(DISPC_SIZE_DIG, val);
+	enable_clocks(0);
+}
+
+static void dispc_read_plane_fifo_sizes(void)
+{
+	const struct dispc_reg fsz_reg[] = { DISPC_GFX_FIFO_SIZE_STATUS,
+				      DISPC_VID_FIFO_SIZE_STATUS(0),
+				      DISPC_VID_FIFO_SIZE_STATUS(1) };
+	u32 size;
+	int plane;
+
+	enable_clocks(1);
+
+	for (plane = 0; plane < ARRAY_SIZE(dispc.fifo_size); ++plane) {
+		if (cpu_is_omap24xx())
+			size = FLD_GET(dispc_read_reg(fsz_reg[plane]), 8, 0);
+		else if (cpu_is_omap34xx())
+			size = FLD_GET(dispc_read_reg(fsz_reg[plane]), 10, 0);
+		else
+			BUG();
+
+		dispc.fifo_size[plane] = size;
+	}
+
+	enable_clocks(0);
+}
+
+u32 dispc_get_plane_fifo_size(enum omap_plane plane)
+{
+	return dispc.fifo_size[plane];
+}
+
+void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high)
+{
+	const struct dispc_reg ftrs_reg[] = { DISPC_GFX_FIFO_THRESHOLD,
+				       DISPC_VID_FIFO_THRESHOLD(0),
+				       DISPC_VID_FIFO_THRESHOLD(1) };
+	enable_clocks(1);
+
+	DSSDBG("fifo(%d) low/high old %u/%u, new %u/%u\n",
+			plane,
+			REG_GET(ftrs_reg[plane], 11, 0),
+			REG_GET(ftrs_reg[plane], 27, 16),
+			low, high);
+
+	if (cpu_is_omap24xx())
+		dispc_write_reg(ftrs_reg[plane],
+				FLD_VAL(high, 24, 16) | FLD_VAL(low, 8, 0));
+	else
+		dispc_write_reg(ftrs_reg[plane],
+				FLD_VAL(high, 27, 16) | FLD_VAL(low, 11, 0));
+
+	enable_clocks(0);
+}
+
+void dispc_enable_fifomerge(bool enable)
+{
+	enable_clocks(1);
+
+	DSSDBG("FIFO merge %s\n", enable ? "enabled" : "disabled");
+	REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 14, 14);
+
+	enable_clocks(0);
+}
+
+static void _dispc_set_fir(enum omap_plane plane, int hinc, int vinc)
+{
+	u32 val;
+	const struct dispc_reg fir_reg[] = { DISPC_VID_FIR(0),
+				      DISPC_VID_FIR(1) };
+
+	BUG_ON(plane == OMAP_DSS_GFX);
+
+	if (cpu_is_omap24xx())
+		val = FLD_VAL(vinc, 27, 16) | FLD_VAL(hinc, 11, 0);
+	else
+		val = FLD_VAL(vinc, 28, 16) | FLD_VAL(hinc, 12, 0);
+	dispc_write_reg(fir_reg[plane-1], val);
+}
+
+static void _dispc_set_vid_accu0(enum omap_plane plane, int haccu, int vaccu)
+{
+	u32 val;
+	const struct dispc_reg ac0_reg[] = { DISPC_VID_ACCU0(0),
+				      DISPC_VID_ACCU0(1) };
+
+	BUG_ON(plane == OMAP_DSS_GFX);
+
+	val = FLD_VAL(vaccu, 25, 16) | FLD_VAL(haccu, 9, 0);
+	dispc_write_reg(ac0_reg[plane-1], val);
+}
+
+static void _dispc_set_vid_accu1(enum omap_plane plane, int haccu, int vaccu)
+{
+	u32 val;
+	const struct dispc_reg ac1_reg[] = { DISPC_VID_ACCU1(0),
+				      DISPC_VID_ACCU1(1) };
+
+	BUG_ON(plane == OMAP_DSS_GFX);
+
+	val = FLD_VAL(vaccu, 25, 16) | FLD_VAL(haccu, 9, 0);
+	dispc_write_reg(ac1_reg[plane-1], val);
+}
+
+
+static void _dispc_set_scaling(enum omap_plane plane,
+		u16 orig_width, u16 orig_height,
+		u16 out_width, u16 out_height,
+		bool ilace, bool five_taps,
+		bool fieldmode)
+{
+	int fir_hinc;
+	int fir_vinc;
+	int hscaleup, vscaleup;
+	int accu0 = 0;
+	int accu1 = 0;
+	u32 l;
+
+	BUG_ON(plane == OMAP_DSS_GFX);
+
+	hscaleup = orig_width <= out_width;
+	vscaleup = orig_height <= out_height;
+
+	_dispc_set_scale_coef(plane, hscaleup, vscaleup, five_taps);
+
+	if (!orig_width || orig_width == out_width)
+		fir_hinc = 0;
+	else
+		fir_hinc = 1024 * orig_width / out_width;
+
+	if (!orig_height || orig_height == out_height)
+		fir_vinc = 0;
+	else
+		fir_vinc = 1024 * orig_height / out_height;
+
+	_dispc_set_fir(plane, fir_hinc, fir_vinc);
+
+	l = dispc_read_reg(dispc_reg_att[plane]);
+	l &= ~((0x0f << 5) | (0x3 << 21));
+
+	l |= fir_hinc ? (1 << 5) : 0;
+	l |= fir_vinc ? (1 << 6) : 0;
+
+	l |= hscaleup ? 0 : (1 << 7);
+	l |= vscaleup ? 0 : (1 << 8);
+
+	l |= five_taps ? (1 << 21) : 0;
+	l |= five_taps ? (1 << 22) : 0;
+
+	dispc_write_reg(dispc_reg_att[plane], l);
+
+	/*
+	 * field 0 = even field = bottom field
+	 * field 1 = odd field = top field
+	 */
+	if (ilace && !fieldmode) {
+		accu1 = 0;
+		accu0 = (fir_vinc / 2) & 0x3ff;
+		if (accu0 >= 1024/2) {
+			accu1 = 1024/2;
+			accu0 -= accu1;
+		}
+	}
+
+	_dispc_set_vid_accu0(plane, 0, accu0);
+	_dispc_set_vid_accu1(plane, 0, accu1);
+}
+
+static void _dispc_set_rotation_attrs(enum omap_plane plane, u8 rotation,
+		bool mirroring, enum omap_color_mode color_mode)
+{
+	if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+			color_mode == OMAP_DSS_COLOR_UYVY) {
+		int vidrot = 0;
+
+		if (mirroring) {
+			switch (rotation) {
+			case OMAP_DSS_ROT_0:
+				vidrot = 2;
+				break;
+			case OMAP_DSS_ROT_90:
+				vidrot = 1;
+				break;
+			case OMAP_DSS_ROT_180:
+				vidrot = 0;
+				break;
+			case OMAP_DSS_ROT_270:
+				vidrot = 3;
+				break;
+			}
+		} else {
+			switch (rotation) {
+			case OMAP_DSS_ROT_0:
+				vidrot = 0;
+				break;
+			case OMAP_DSS_ROT_90:
+				vidrot = 1;
+				break;
+			case OMAP_DSS_ROT_180:
+				vidrot = 2;
+				break;
+			case OMAP_DSS_ROT_270:
+				vidrot = 3;
+				break;
+			}
+		}
+
+		REG_FLD_MOD(dispc_reg_att[plane], vidrot, 13, 12);
+
+		if (rotation == OMAP_DSS_ROT_90 || rotation == OMAP_DSS_ROT_270)
+			REG_FLD_MOD(dispc_reg_att[plane], 0x1, 18, 18);
+		else
+			REG_FLD_MOD(dispc_reg_att[plane], 0x0, 18, 18);
+	} else {
+		REG_FLD_MOD(dispc_reg_att[plane], 0, 13, 12);
+		REG_FLD_MOD(dispc_reg_att[plane], 0, 18, 18);
+	}
+}
+
+static int color_mode_to_bpp(enum omap_color_mode color_mode)
+{
+	switch (color_mode) {
+	case OMAP_DSS_COLOR_CLUT1:
+		return 1;
+	case OMAP_DSS_COLOR_CLUT2:
+		return 2;
+	case OMAP_DSS_COLOR_CLUT4:
+		return 4;
+	case OMAP_DSS_COLOR_CLUT8:
+		return 8;
+	case OMAP_DSS_COLOR_RGB12U:
+	case OMAP_DSS_COLOR_RGB16:
+	case OMAP_DSS_COLOR_ARGB16:
+	case OMAP_DSS_COLOR_YUV2:
+	case OMAP_DSS_COLOR_UYVY:
+		return 16;
+	case OMAP_DSS_COLOR_RGB24P:
+		return 24;
+	case OMAP_DSS_COLOR_RGB24U:
+	case OMAP_DSS_COLOR_ARGB32:
+	case OMAP_DSS_COLOR_RGBA32:
+	case OMAP_DSS_COLOR_RGBX32:
+		return 32;
+	default:
+		BUG();
+	}
+}
+
+static s32 pixinc(int pixels, u8 ps)
+{
+	if (pixels == 1)
+		return 1;
+	else if (pixels > 1)
+		return 1 + (pixels - 1) * ps;
+	else if (pixels < 0)
+		return 1 - (-pixels + 1) * ps;
+	else
+		BUG();
+}
+
+static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
+		u16 screen_width,
+		u16 width, u16 height,
+		enum omap_color_mode color_mode, bool fieldmode,
+		unsigned int field_offset,
+		unsigned *offset0, unsigned *offset1,
+		s32 *row_inc, s32 *pix_inc)
+{
+	u8 ps;
+
+	/* FIXME CLUT formats */
+	switch (color_mode) {
+	case OMAP_DSS_COLOR_CLUT1:
+	case OMAP_DSS_COLOR_CLUT2:
+	case OMAP_DSS_COLOR_CLUT4:
+	case OMAP_DSS_COLOR_CLUT8:
+		BUG();
+		return;
+	case OMAP_DSS_COLOR_YUV2:
+	case OMAP_DSS_COLOR_UYVY:
+		ps = 4;
+		break;
+	default:
+		ps = color_mode_to_bpp(color_mode) / 8;
+		break;
+	}
+
+	DSSDBG("calc_rot(%d): scrw %d, %dx%d\n", rotation, screen_width,
+			width, height);
+
+	/*
+	 * field 0 = even field = bottom field
+	 * field 1 = odd field = top field
+	 */
+	switch (rotation + mirror * 4) {
+	case OMAP_DSS_ROT_0:
+	case OMAP_DSS_ROT_180:
+		/*
+		 * If the pixel format is YUV or UYVY divide the width
+		 * of the image by 2 for 0 and 180 degree rotation.
+		 */
+		if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+			color_mode == OMAP_DSS_COLOR_UYVY)
+			width = width >> 1;
+	case OMAP_DSS_ROT_90:
+	case OMAP_DSS_ROT_270:
+		*offset1 = 0;
+		if (field_offset)
+			*offset0 = field_offset * screen_width * ps;
+		else
+			*offset0 = 0;
+
+		*row_inc = pixinc(1 + (screen_width - width) +
+				(fieldmode ? screen_width : 0),
+				ps);
+		*pix_inc = pixinc(1, ps);
+		break;
+
+	case OMAP_DSS_ROT_0 + 4:
+	case OMAP_DSS_ROT_180 + 4:
+		/* If the pixel format is YUV or UYVY divide the width
+		 * of the image by 2  for 0 degree and 180 degree
+		 */
+		if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+			color_mode == OMAP_DSS_COLOR_UYVY)
+			width = width >> 1;
+	case OMAP_DSS_ROT_90 + 4:
+	case OMAP_DSS_ROT_270 + 4:
+		*offset1 = 0;
+		if (field_offset)
+			*offset0 = field_offset * screen_width * ps;
+		else
+			*offset0 = 0;
+		*row_inc = pixinc(1 - (screen_width + width) -
+				(fieldmode ? screen_width : 0),
+				ps);
+		*pix_inc = pixinc(1, ps);
+		break;
+
+	default:
+		BUG();
+	}
+}
+
+static void calc_dma_rotation_offset(u8 rotation, bool mirror,
+		u16 screen_width,
+		u16 width, u16 height,
+		enum omap_color_mode color_mode, bool fieldmode,
+		unsigned int field_offset,
+		unsigned *offset0, unsigned *offset1,
+		s32 *row_inc, s32 *pix_inc)
+{
+	u8 ps;
+	u16 fbw, fbh;
+
+	/* FIXME CLUT formats */
+	switch (color_mode) {
+	case OMAP_DSS_COLOR_CLUT1:
+	case OMAP_DSS_COLOR_CLUT2:
+	case OMAP_DSS_COLOR_CLUT4:
+	case OMAP_DSS_COLOR_CLUT8:
+		BUG();
+		return;
+	default:
+		ps = color_mode_to_bpp(color_mode) / 8;
+		break;
+	}
+
+	DSSDBG("calc_rot(%d): scrw %d, %dx%d\n", rotation, screen_width,
+			width, height);
+
+	/* width & height are overlay sizes, convert to fb sizes */
+
+	if (rotation == OMAP_DSS_ROT_0 || rotation == OMAP_DSS_ROT_180) {
+		fbw = width;
+		fbh = height;
+	} else {
+		fbw = height;
+		fbh = width;
+	}
+
+	/*
+	 * field 0 = even field = bottom field
+	 * field 1 = odd field = top field
+	 */
+	switch (rotation + mirror * 4) {
+	case OMAP_DSS_ROT_0:
+		*offset1 = 0;
+		if (field_offset)
+			*offset0 = *offset1 + field_offset * screen_width * ps;
+		else
+			*offset0 = *offset1;
+		*row_inc = pixinc(1 + (screen_width - fbw) +
+				(fieldmode ? screen_width : 0),
+				ps);
+		*pix_inc = pixinc(1, ps);
+		break;
+	case OMAP_DSS_ROT_90:
+		*offset1 = screen_width * (fbh - 1) * ps;
+		if (field_offset)
+			*offset0 = *offset1 + field_offset * ps;
+		else
+			*offset0 = *offset1;
+		*row_inc = pixinc(screen_width * (fbh - 1) + 1 +
+				(fieldmode ? 1 : 0), ps);
+		*pix_inc = pixinc(-screen_width, ps);
+		break;
+	case OMAP_DSS_ROT_180:
+		*offset1 = (screen_width * (fbh - 1) + fbw - 1) * ps;
+		if (field_offset)
+			*offset0 = *offset1 - field_offset * screen_width * ps;
+		else
+			*offset0 = *offset1;
+		*row_inc = pixinc(-1 -
+				(screen_width - fbw) -
+				(fieldmode ? screen_width : 0),
+				ps);
+		*pix_inc = pixinc(-1, ps);
+		break;
+	case OMAP_DSS_ROT_270:
+		*offset1 = (fbw - 1) * ps;
+		if (field_offset)
+			*offset0 = *offset1 - field_offset * ps;
+		else
+			*offset0 = *offset1;
+		*row_inc = pixinc(-screen_width * (fbh - 1) - 1 -
+				(fieldmode ? 1 : 0), ps);
+		*pix_inc = pixinc(screen_width, ps);
+		break;
+
+	/* mirroring */
+	case OMAP_DSS_ROT_0 + 4:
+		*offset1 = (fbw - 1) * ps;
+		if (field_offset)
+			*offset0 = *offset1 + field_offset * screen_width * ps;
+		else
+			*offset0 = *offset1;
+		*row_inc = pixinc(screen_width * 2 - 1 +
+				(fieldmode ? screen_width : 0),
+				ps);
+		*pix_inc = pixinc(-1, ps);
+		break;
+
+	case OMAP_DSS_ROT_90 + 4:
+		*offset1 = 0;
+		if (field_offset)
+			*offset0 = *offset1 + field_offset * ps;
+		else
+			*offset0 = *offset1;
+		*row_inc = pixinc(-screen_width * (fbh - 1) + 1 +
+				(fieldmode ? 1 : 0),
+				ps);
+		*pix_inc = pixinc(screen_width, ps);
+		break;
+
+	case OMAP_DSS_ROT_180 + 4:
+		*offset1 = screen_width * (fbh - 1) * ps;
+		if (field_offset)
+			*offset0 = *offset1 - field_offset * screen_width * ps;
+		else
+			*offset0 = *offset1;
+		*row_inc = pixinc(1 - screen_width * 2 -
+				(fieldmode ? screen_width : 0),
+				ps);
+		*pix_inc = pixinc(1, ps);
+		break;
+
+	case OMAP_DSS_ROT_270 + 4:
+		*offset1 = (screen_width * (fbh - 1) + fbw - 1) * ps;
+		if (field_offset)
+			*offset0 = *offset1 - field_offset * ps;
+		else
+			*offset0 = *offset1;
+		*row_inc = pixinc(screen_width * (fbh - 1) - 1 -
+				(fieldmode ? 1 : 0),
+				ps);
+		*pix_inc = pixinc(-screen_width, ps);
+		break;
+
+	default:
+		BUG();
+	}
+}
+
+static unsigned long calc_fclk_five_taps(u16 width, u16 height,
+		u16 out_width, u16 out_height, enum omap_color_mode color_mode)
+{
+	u32 fclk = 0;
+	/* FIXME venc pclk? */
+	u64 tmp, pclk = dispc_pclk_rate();
+
+	if (height > out_height) {
+		/* FIXME get real display PPL */
+		unsigned int ppl = 800;
+
+		tmp = pclk * height * out_width;
+		do_div(tmp, 2 * out_height * ppl);
+		fclk = tmp;
+
+		if (height > 2 * out_height && ppl != out_width) {
+			tmp = pclk * (height - 2 * out_height) * out_width;
+			do_div(tmp, 2 * out_height * (ppl - out_width));
+			fclk = max(fclk, (u32) tmp);
+		}
+	}
+
+	if (width > out_width) {
+		tmp = pclk * width;
+		do_div(tmp, out_width);
+		fclk = max(fclk, (u32) tmp);
+
+		if (color_mode == OMAP_DSS_COLOR_RGB24U)
+			fclk <<= 1;
+	}
+
+	return fclk;
+}
+
+static unsigned long calc_fclk(u16 width, u16 height,
+		u16 out_width, u16 out_height)
+{
+	unsigned int hf, vf;
+
+	/*
+	 * FIXME how to determine the 'A' factor
+	 * for the no downscaling case ?
+	 */
+
+	if (width > 3 * out_width)
+		hf = 4;
+	else if (width > 2 * out_width)
+		hf = 3;
+	else if (width > out_width)
+		hf = 2;
+	else
+		hf = 1;
+
+	if (height > out_height)
+		vf = 2;
+	else
+		vf = 1;
+
+	/* FIXME venc pclk? */
+	return dispc_pclk_rate() * vf * hf;
+}
+
+void dispc_set_channel_out(enum omap_plane plane, enum omap_channel channel_out)
+{
+	enable_clocks(1);
+	_dispc_set_channel_out(plane, channel_out);
+	enable_clocks(0);
+}
+
+static int _dispc_setup_plane(enum omap_plane plane,
+		u32 paddr, u16 screen_width,
+		u16 pos_x, u16 pos_y,
+		u16 width, u16 height,
+		u16 out_width, u16 out_height,
+		enum omap_color_mode color_mode,
+		bool ilace,
+		enum omap_dss_rotation_type rotation_type,
+		u8 rotation, int mirror,
+		u8 global_alpha)
+{
+	const int maxdownscale = cpu_is_omap34xx() ? 4 : 2;
+	bool five_taps = 0;
+	bool fieldmode = 0;
+	int cconv = 0;
+	unsigned offset0, offset1;
+	s32 row_inc;
+	s32 pix_inc;
+	u16 frame_height = height;
+	unsigned int field_offset = 0;
+
+	if (paddr == 0)
+		return -EINVAL;
+
+	if (ilace && height == out_height)
+		fieldmode = 1;
+
+	if (ilace) {
+		if (fieldmode)
+			height /= 2;
+		pos_y /= 2;
+		out_height /= 2;
+
+		DSSDBG("adjusting for ilace: height %d, pos_y %d, "
+				"out_height %d\n",
+				height, pos_y, out_height);
+	}
+
+	if (plane == OMAP_DSS_GFX) {
+		if (width != out_width || height != out_height)
+			return -EINVAL;
+
+		switch (color_mode) {
+		case OMAP_DSS_COLOR_ARGB16:
+		case OMAP_DSS_COLOR_ARGB32:
+		case OMAP_DSS_COLOR_RGBA32:
+		case OMAP_DSS_COLOR_RGBX32:
+			if (cpu_is_omap24xx())
+				return -EINVAL;
+			/* fall through */
+		case OMAP_DSS_COLOR_RGB12U:
+		case OMAP_DSS_COLOR_RGB16:
+		case OMAP_DSS_COLOR_RGB24P:
+		case OMAP_DSS_COLOR_RGB24U:
+			break;
+
+		default:
+			return -EINVAL;
+		}
+	} else {
+		/* video plane */
+
+		unsigned long fclk = 0;
+
+		if (out_width < width / maxdownscale ||
+		   out_width > width * 8)
+			return -EINVAL;
+
+		if (out_height < height / maxdownscale ||
+		   out_height > height * 8)
+			return -EINVAL;
+
+		switch (color_mode) {
+		case OMAP_DSS_COLOR_RGBX32:
+		case OMAP_DSS_COLOR_RGB12U:
+			if (cpu_is_omap24xx())
+				return -EINVAL;
+			/* fall through */
+		case OMAP_DSS_COLOR_RGB16:
+		case OMAP_DSS_COLOR_RGB24P:
+		case OMAP_DSS_COLOR_RGB24U:
+			break;
+
+		case OMAP_DSS_COLOR_ARGB16:
+		case OMAP_DSS_COLOR_ARGB32:
+		case OMAP_DSS_COLOR_RGBA32:
+			if (cpu_is_omap24xx())
+				return -EINVAL;
+			if (plane == OMAP_DSS_VIDEO1)
+				return -EINVAL;
+			break;
+
+		case OMAP_DSS_COLOR_YUV2:
+		case OMAP_DSS_COLOR_UYVY:
+			cconv = 1;
+			break;
+
+		default:
+			return -EINVAL;
+		}
+
+		/* Must use 5-tap filter? */
+		five_taps = height > out_height * 2;
+
+		if (!five_taps) {
+			fclk = calc_fclk(width, height,
+					out_width, out_height);
+
+			/* Try 5-tap filter if 3-tap fclk is too high */
+			if (cpu_is_omap34xx() && height > out_height &&
+					fclk > dispc_fclk_rate())
+				five_taps = true;
+		}
+
+		if (width > (2048 >> five_taps)) {
+			DSSERR("failed to set up scaling, fclk too low\n");
+			return -EINVAL;
+		}
+
+		if (five_taps)
+			fclk = calc_fclk_five_taps(width, height,
+					out_width, out_height, color_mode);
+
+		DSSDBG("required fclk rate = %lu Hz\n", fclk);
+		DSSDBG("current fclk rate = %lu Hz\n", dispc_fclk_rate());
+
+		if (fclk > dispc_fclk_rate()) {
+			DSSERR("failed to set up scaling, "
+					"required fclk rate = %lu Hz, "
+					"current fclk rate = %lu Hz\n",
+					fclk, dispc_fclk_rate());
+			return -EINVAL;
+		}
+	}
+
+	if (ilace && !fieldmode) {
+		/*
+		 * when downscaling the bottom field may have to start several
+		 * source lines below the top field. Unfortunately ACCUI
+		 * registers will only hold the fractional part of the offset
+		 * so the integer part must be added to the base address of the
+		 * bottom field.
+		 */
+		if (!height || height == out_height)
+			field_offset = 0;
+		else
+			field_offset = height / out_height / 2;
+	}
+
+	/* Fields are independent but interleaved in memory. */
+	if (fieldmode)
+		field_offset = 1;
+
+	if (rotation_type == OMAP_DSS_ROT_DMA)
+		calc_dma_rotation_offset(rotation, mirror,
+				screen_width, width, frame_height, color_mode,
+				fieldmode, field_offset,
+				&offset0, &offset1, &row_inc, &pix_inc);
+	else
+		calc_vrfb_rotation_offset(rotation, mirror,
+				screen_width, width, frame_height, color_mode,
+				fieldmode, field_offset,
+				&offset0, &offset1, &row_inc, &pix_inc);
+
+	DSSDBG("offset0 %u, offset1 %u, row_inc %d, pix_inc %d\n",
+			offset0, offset1, row_inc, pix_inc);
+
+	_dispc_set_color_mode(plane, color_mode);
+
+	_dispc_set_plane_ba0(plane, paddr + offset0);
+	_dispc_set_plane_ba1(plane, paddr + offset1);
+
+	_dispc_set_row_inc(plane, row_inc);
+	_dispc_set_pix_inc(plane, pix_inc);
+
+	DSSDBG("%d,%d %dx%d -> %dx%d\n", pos_x, pos_y, width, height,
+			out_width, out_height);
+
+	_dispc_set_plane_pos(plane, pos_x, pos_y);
+
+	_dispc_set_pic_size(plane, width, height);
+
+	if (plane != OMAP_DSS_GFX) {
+		_dispc_set_scaling(plane, width, height,
+				   out_width, out_height,
+				   ilace, five_taps, fieldmode);
+		_dispc_set_vid_size(plane, out_width, out_height);
+		_dispc_set_vid_color_conv(plane, cconv);
+	}
+
+	_dispc_set_rotation_attrs(plane, rotation, mirror, color_mode);
+
+	if (plane != OMAP_DSS_VIDEO1)
+		_dispc_setup_global_alpha(plane, global_alpha);
+
+	return 0;
+}
+
+static void _dispc_enable_plane(enum omap_plane plane, bool enable)
+{
+	REG_FLD_MOD(dispc_reg_att[plane], enable ? 1 : 0, 0, 0);
+}
+
+static void dispc_disable_isr(void *data, u32 mask)
+{
+	struct completion *compl = data;
+	complete(compl);
+}
+
+static void _enable_lcd_out(bool enable)
+{
+	REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 0, 0);
+}
+
+void dispc_enable_lcd_out(bool enable)
+{
+	struct completion frame_done_completion;
+	bool is_on;
+	int r;
+
+	enable_clocks(1);
+
+	/* When we disable LCD output, we need to wait until frame is done.
+	 * Otherwise the DSS is still working, and turning off the clocks
+	 * prevents DSS from going to OFF mode */
+	is_on = REG_GET(DISPC_CONTROL, 0, 0);
+
+	if (!enable && is_on) {
+		init_completion(&frame_done_completion);
+
+		r = omap_dispc_register_isr(dispc_disable_isr,
+				&frame_done_completion,
+				DISPC_IRQ_FRAMEDONE);
+
+		if (r)
+			DSSERR("failed to register FRAMEDONE isr\n");
+	}
+
+	_enable_lcd_out(enable);
+
+	if (!enable && is_on) {
+		if (!wait_for_completion_timeout(&frame_done_completion,
+					msecs_to_jiffies(100)))
+			DSSERR("timeout waiting for FRAME DONE\n");
+
+		r = omap_dispc_unregister_isr(dispc_disable_isr,
+				&frame_done_completion,
+				DISPC_IRQ_FRAMEDONE);
+
+		if (r)
+			DSSERR("failed to unregister FRAMEDONE isr\n");
+	}
+
+	enable_clocks(0);
+}
+
+static void _enable_digit_out(bool enable)
+{
+	REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 1, 1);
+}
+
+void dispc_enable_digit_out(bool enable)
+{
+	struct completion frame_done_completion;
+	int r;
+
+	enable_clocks(1);
+
+	if (REG_GET(DISPC_CONTROL, 1, 1) == enable) {
+		enable_clocks(0);
+		return;
+	}
+
+	if (enable) {
+		unsigned long flags;
+		/* When we enable digit output, we'll get an extra digit
+		 * sync lost interrupt, that we need to ignore */
+		spin_lock_irqsave(&dispc.irq_lock, flags);
+		dispc.irq_error_mask &= ~DISPC_IRQ_SYNC_LOST_DIGIT;
+		_omap_dispc_set_irqs();
+		spin_unlock_irqrestore(&dispc.irq_lock, flags);
+	}
+
+	/* When we disable digit output, we need to wait until fields are done.
+	 * Otherwise the DSS is still working, and turning off the clocks
+	 * prevents DSS from going to OFF mode. And when enabling, we need to
+	 * wait for the extra sync losts */
+	init_completion(&frame_done_completion);
+
+	r = omap_dispc_register_isr(dispc_disable_isr, &frame_done_completion,
+			DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD);
+	if (r)
+		DSSERR("failed to register EVSYNC isr\n");
+
+	_enable_digit_out(enable);
+
+	/* XXX I understand from TRM that we should only wait for the
+	 * current field to complete. But it seems we have to wait
+	 * for both fields */
+	if (!wait_for_completion_timeout(&frame_done_completion,
+				msecs_to_jiffies(100)))
+		DSSERR("timeout waiting for EVSYNC\n");
+
+	if (!wait_for_completion_timeout(&frame_done_completion,
+				msecs_to_jiffies(100)))
+		DSSERR("timeout waiting for EVSYNC\n");
+
+	r = omap_dispc_unregister_isr(dispc_disable_isr,
+			&frame_done_completion,
+			DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD);
+	if (r)
+		DSSERR("failed to unregister EVSYNC isr\n");
+
+	if (enable) {
+		unsigned long flags;
+		spin_lock_irqsave(&dispc.irq_lock, flags);
+		dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR;
+		dispc_write_reg(DISPC_IRQSTATUS, DISPC_IRQ_SYNC_LOST_DIGIT);
+		_omap_dispc_set_irqs();
+		spin_unlock_irqrestore(&dispc.irq_lock, flags);
+	}
+
+	enable_clocks(0);
+}
+
+void dispc_lcd_enable_signal_polarity(bool act_high)
+{
+	enable_clocks(1);
+	REG_FLD_MOD(DISPC_CONTROL, act_high ? 1 : 0, 29, 29);
+	enable_clocks(0);
+}
+
+void dispc_lcd_enable_signal(bool enable)
+{
+	enable_clocks(1);
+	REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 28, 28);
+	enable_clocks(0);
+}
+
+void dispc_pck_free_enable(bool enable)
+{
+	enable_clocks(1);
+	REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 27, 27);
+	enable_clocks(0);
+}
+
+void dispc_enable_fifohandcheck(bool enable)
+{
+	enable_clocks(1);
+	REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 16, 16);
+	enable_clocks(0);
+}
+
+
+void dispc_set_lcd_display_type(enum omap_lcd_display_type type)
+{
+	int mode;
+
+	switch (type) {
+	case OMAP_DSS_LCD_DISPLAY_STN:
+		mode = 0;
+		break;
+
+	case OMAP_DSS_LCD_DISPLAY_TFT:
+		mode = 1;
+		break;
+
+	default:
+		BUG();
+		return;
+	}
+
+	enable_clocks(1);
+	REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3);
+	enable_clocks(0);
+}
+
+void dispc_set_loadmode(enum omap_dss_load_mode mode)
+{
+	enable_clocks(1);
+	REG_FLD_MOD(DISPC_CONFIG, mode, 2, 1);
+	enable_clocks(0);
+}
+
+
+void dispc_set_default_color(enum omap_channel channel, u32 color)
+{
+	const struct dispc_reg def_reg[] = { DISPC_DEFAULT_COLOR0,
+				DISPC_DEFAULT_COLOR1 };
+
+	enable_clocks(1);
+	dispc_write_reg(def_reg[channel], color);
+	enable_clocks(0);
+}
+
+u32 dispc_get_default_color(enum omap_channel channel)
+{
+	const struct dispc_reg def_reg[] = { DISPC_DEFAULT_COLOR0,
+				DISPC_DEFAULT_COLOR1 };
+	u32 l;
+
+	BUG_ON(channel != OMAP_DSS_CHANNEL_DIGIT &&
+	       channel != OMAP_DSS_CHANNEL_LCD);
+
+	enable_clocks(1);
+	l = dispc_read_reg(def_reg[channel]);
+	enable_clocks(0);
+
+	return l;
+}
+
+void dispc_set_trans_key(enum omap_channel ch,
+		enum omap_dss_trans_key_type type,
+		u32 trans_key)
+{
+	const struct dispc_reg tr_reg[] = {
+		DISPC_TRANS_COLOR0, DISPC_TRANS_COLOR1 };
+
+	enable_clocks(1);
+	if (ch == OMAP_DSS_CHANNEL_LCD)
+		REG_FLD_MOD(DISPC_CONFIG, type, 11, 11);
+	else /* OMAP_DSS_CHANNEL_DIGIT */
+		REG_FLD_MOD(DISPC_CONFIG, type, 13, 13);
+
+	dispc_write_reg(tr_reg[ch], trans_key);
+	enable_clocks(0);
+}
+
+void dispc_get_trans_key(enum omap_channel ch,
+		enum omap_dss_trans_key_type *type,
+		u32 *trans_key)
+{
+	const struct dispc_reg tr_reg[] = {
+		DISPC_TRANS_COLOR0, DISPC_TRANS_COLOR1 };
+
+	enable_clocks(1);
+	if (type) {
+		if (ch == OMAP_DSS_CHANNEL_LCD)
+			*type = REG_GET(DISPC_CONFIG, 11, 11);
+		else if (ch == OMAP_DSS_CHANNEL_DIGIT)
+			*type = REG_GET(DISPC_CONFIG, 13, 13);
+		else
+			BUG();
+	}
+
+	if (trans_key)
+		*trans_key = dispc_read_reg(tr_reg[ch]);
+	enable_clocks(0);
+}
+
+void dispc_enable_trans_key(enum omap_channel ch, bool enable)
+{
+	enable_clocks(1);
+	if (ch == OMAP_DSS_CHANNEL_LCD)
+		REG_FLD_MOD(DISPC_CONFIG, enable, 10, 10);
+	else /* OMAP_DSS_CHANNEL_DIGIT */
+		REG_FLD_MOD(DISPC_CONFIG, enable, 12, 12);
+	enable_clocks(0);
+}
+void dispc_enable_alpha_blending(enum omap_channel ch, bool enable)
+{
+	if (cpu_is_omap24xx())
+		return;
+
+	enable_clocks(1);
+	if (ch == OMAP_DSS_CHANNEL_LCD)
+		REG_FLD_MOD(DISPC_CONFIG, enable, 18, 18);
+	else /* OMAP_DSS_CHANNEL_DIGIT */
+		REG_FLD_MOD(DISPC_CONFIG, enable, 19, 19);
+	enable_clocks(0);
+}
+bool dispc_alpha_blending_enabled(enum omap_channel ch)
+{
+	bool enabled;
+
+	if (cpu_is_omap24xx())
+		return false;
+
+	enable_clocks(1);
+	if (ch == OMAP_DSS_CHANNEL_LCD)
+		enabled = REG_GET(DISPC_CONFIG, 18, 18);
+	else if (ch == OMAP_DSS_CHANNEL_DIGIT)
+		enabled = REG_GET(DISPC_CONFIG, 18, 18);
+	else
+		BUG();
+	enable_clocks(0);
+
+	return enabled;
+
+}
+
+
+bool dispc_trans_key_enabled(enum omap_channel ch)
+{
+	bool enabled;
+
+	enable_clocks(1);
+	if (ch == OMAP_DSS_CHANNEL_LCD)
+		enabled = REG_GET(DISPC_CONFIG, 10, 10);
+	else if (ch == OMAP_DSS_CHANNEL_DIGIT)
+		enabled = REG_GET(DISPC_CONFIG, 12, 12);
+	else
+		BUG();
+	enable_clocks(0);
+
+	return enabled;
+}
+
+
+void dispc_set_tft_data_lines(u8 data_lines)
+{
+	int code;
+
+	switch (data_lines) {
+	case 12:
+		code = 0;
+		break;
+	case 16:
+		code = 1;
+		break;
+	case 18:
+		code = 2;
+		break;
+	case 24:
+		code = 3;
+		break;
+	default:
+		BUG();
+		return;
+	}
+
+	enable_clocks(1);
+	REG_FLD_MOD(DISPC_CONTROL, code, 9, 8);
+	enable_clocks(0);
+}
+
+void dispc_set_parallel_interface_mode(enum omap_parallel_interface_mode mode)
+{
+	u32 l;
+	int stallmode;
+	int gpout0 = 1;
+	int gpout1;
+
+	switch (mode) {
+	case OMAP_DSS_PARALLELMODE_BYPASS:
+		stallmode = 0;
+		gpout1 = 1;
+		break;
+
+	case OMAP_DSS_PARALLELMODE_RFBI:
+		stallmode = 1;
+		gpout1 = 0;
+		break;
+
+	case OMAP_DSS_PARALLELMODE_DSI:
+		stallmode = 1;
+		gpout1 = 1;
+		break;
+
+	default:
+		BUG();
+		return;
+	}
+
+	enable_clocks(1);
+
+	l = dispc_read_reg(DISPC_CONTROL);
+
+	l = FLD_MOD(l, stallmode, 11, 11);
+	l = FLD_MOD(l, gpout0, 15, 15);
+	l = FLD_MOD(l, gpout1, 16, 16);
+
+	dispc_write_reg(DISPC_CONTROL, l);
+
+	enable_clocks(0);
+}
+
+static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
+		int vsw, int vfp, int vbp)
+{
+	if (cpu_is_omap24xx() || omap_rev() < OMAP3430_REV_ES3_0) {
+		if (hsw < 1 || hsw > 64 ||
+				hfp < 1 || hfp > 256 ||
+				hbp < 1 || hbp > 256 ||
+				vsw < 1 || vsw > 64 ||
+				vfp < 0 || vfp > 255 ||
+				vbp < 0 || vbp > 255)
+			return false;
+	} else {
+		if (hsw < 1 || hsw > 256 ||
+				hfp < 1 || hfp > 4096 ||
+				hbp < 1 || hbp > 4096 ||
+				vsw < 1 || vsw > 256 ||
+				vfp < 0 || vfp > 4095 ||
+				vbp < 0 || vbp > 4095)
+			return false;
+	}
+
+	return true;
+}
+
+bool dispc_lcd_timings_ok(struct omap_video_timings *timings)
+{
+	return _dispc_lcd_timings_ok(timings->hsw, timings->hfp,
+			timings->hbp, timings->vsw,
+			timings->vfp, timings->vbp);
+}
+
+static void _dispc_set_lcd_timings(int hsw, int hfp, int hbp,
+				   int vsw, int vfp, int vbp)
+{
+	u32 timing_h, timing_v;
+
+	if (cpu_is_omap24xx() || omap_rev() < OMAP3430_REV_ES3_0) {
+		timing_h = FLD_VAL(hsw-1, 5, 0) | FLD_VAL(hfp-1, 15, 8) |
+			FLD_VAL(hbp-1, 27, 20);
+
+		timing_v = FLD_VAL(vsw-1, 5, 0) | FLD_VAL(vfp, 15, 8) |
+			FLD_VAL(vbp, 27, 20);
+	} else {
+		timing_h = FLD_VAL(hsw-1, 7, 0) | FLD_VAL(hfp-1, 19, 8) |
+			FLD_VAL(hbp-1, 31, 20);
+
+		timing_v = FLD_VAL(vsw-1, 7, 0) | FLD_VAL(vfp, 19, 8) |
+			FLD_VAL(vbp, 31, 20);
+	}
+
+	enable_clocks(1);
+	dispc_write_reg(DISPC_TIMING_H, timing_h);
+	dispc_write_reg(DISPC_TIMING_V, timing_v);
+	enable_clocks(0);
+}
+
+/* change name to mode? */
+void dispc_set_lcd_timings(struct omap_video_timings *timings)
+{
+	unsigned xtot, ytot;
+	unsigned long ht, vt;
+
+	if (!_dispc_lcd_timings_ok(timings->hsw, timings->hfp,
+				timings->hbp, timings->vsw,
+				timings->vfp, timings->vbp))
+		BUG();
+
+	_dispc_set_lcd_timings(timings->hsw, timings->hfp, timings->hbp,
+			timings->vsw, timings->vfp, timings->vbp);
+
+	dispc_set_lcd_size(timings->x_res, timings->y_res);
+
+	xtot = timings->x_res + timings->hfp + timings->hsw + timings->hbp;
+	ytot = timings->y_res + timings->vfp + timings->vsw + timings->vbp;
+
+	ht = (timings->pixel_clock * 1000) / xtot;
+	vt = (timings->pixel_clock * 1000) / xtot / ytot;
+
+	DSSDBG("xres %u yres %u\n", timings->x_res, timings->y_res);
+	DSSDBG("pck %u\n", timings->pixel_clock);
+	DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
+			timings->hsw, timings->hfp, timings->hbp,
+			timings->vsw, timings->vfp, timings->vbp);
+
+	DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
+}
+
+static void dispc_set_lcd_divisor(u16 lck_div, u16 pck_div)
+{
+	BUG_ON(lck_div < 1);
+	BUG_ON(pck_div < 2);
+
+	enable_clocks(1);
+	dispc_write_reg(DISPC_DIVISOR,
+			FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0));
+	enable_clocks(0);
+}
+
+static void dispc_get_lcd_divisor(int *lck_div, int *pck_div)
+{
+	u32 l;
+	l = dispc_read_reg(DISPC_DIVISOR);
+	*lck_div = FLD_GET(l, 23, 16);
+	*pck_div = FLD_GET(l, 7, 0);
+}
+
+unsigned long dispc_fclk_rate(void)
+{
+	unsigned long r = 0;
+
+	if (dss_get_dispc_clk_source() == 0)
+		r = dss_clk_get_rate(DSS_CLK_FCK1);
+	else
+#ifdef CONFIG_OMAP2_DSS_DSI
+		r = dsi_get_dsi1_pll_rate();
+#else
+	BUG();
+#endif
+	return r;
+}
+
+unsigned long dispc_lclk_rate(void)
+{
+	int lcd;
+	unsigned long r;
+	u32 l;
+
+	l = dispc_read_reg(DISPC_DIVISOR);
+
+	lcd = FLD_GET(l, 23, 16);
+
+	r = dispc_fclk_rate();
+
+	return r / lcd;
+}
+
+unsigned long dispc_pclk_rate(void)
+{
+	int lcd, pcd;
+	unsigned long r;
+	u32 l;
+
+	l = dispc_read_reg(DISPC_DIVISOR);
+
+	lcd = FLD_GET(l, 23, 16);
+	pcd = FLD_GET(l, 7, 0);
+
+	r = dispc_fclk_rate();
+
+	return r / lcd / pcd;
+}
+
+void dispc_dump_clocks(struct seq_file *s)
+{
+	int lcd, pcd;
+
+	enable_clocks(1);
+
+	dispc_get_lcd_divisor(&lcd, &pcd);
+
+	seq_printf(s, "- DISPC -\n");
+
+	seq_printf(s, "dispc fclk source = %s\n",
+			dss_get_dispc_clk_source() == 0 ?
+			"dss1_alwon_fclk" : "dsi1_pll_fclk");
+
+	seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate());
+	seq_printf(s, "lck\t\t%-16lulck div\t%u\n", dispc_lclk_rate(), lcd);
+	seq_printf(s, "pck\t\t%-16lupck div\t%u\n", dispc_pclk_rate(), pcd);
+
+	enable_clocks(0);
+}
+
+void dispc_dump_regs(struct seq_file *s)
+{
+#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dispc_read_reg(r))
+
+	dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+	DUMPREG(DISPC_REVISION);
+	DUMPREG(DISPC_SYSCONFIG);
+	DUMPREG(DISPC_SYSSTATUS);
+	DUMPREG(DISPC_IRQSTATUS);
+	DUMPREG(DISPC_IRQENABLE);
+	DUMPREG(DISPC_CONTROL);
+	DUMPREG(DISPC_CONFIG);
+	DUMPREG(DISPC_CAPABLE);
+	DUMPREG(DISPC_DEFAULT_COLOR0);
+	DUMPREG(DISPC_DEFAULT_COLOR1);
+	DUMPREG(DISPC_TRANS_COLOR0);
+	DUMPREG(DISPC_TRANS_COLOR1);
+	DUMPREG(DISPC_LINE_STATUS);
+	DUMPREG(DISPC_LINE_NUMBER);
+	DUMPREG(DISPC_TIMING_H);
+	DUMPREG(DISPC_TIMING_V);
+	DUMPREG(DISPC_POL_FREQ);
+	DUMPREG(DISPC_DIVISOR);
+	DUMPREG(DISPC_GLOBAL_ALPHA);
+	DUMPREG(DISPC_SIZE_DIG);
+	DUMPREG(DISPC_SIZE_LCD);
+
+	DUMPREG(DISPC_GFX_BA0);
+	DUMPREG(DISPC_GFX_BA1);
+	DUMPREG(DISPC_GFX_POSITION);
+	DUMPREG(DISPC_GFX_SIZE);
+	DUMPREG(DISPC_GFX_ATTRIBUTES);
+	DUMPREG(DISPC_GFX_FIFO_THRESHOLD);
+	DUMPREG(DISPC_GFX_FIFO_SIZE_STATUS);
+	DUMPREG(DISPC_GFX_ROW_INC);
+	DUMPREG(DISPC_GFX_PIXEL_INC);
+	DUMPREG(DISPC_GFX_WINDOW_SKIP);
+	DUMPREG(DISPC_GFX_TABLE_BA);
+
+	DUMPREG(DISPC_DATA_CYCLE1);
+	DUMPREG(DISPC_DATA_CYCLE2);
+	DUMPREG(DISPC_DATA_CYCLE3);
+
+	DUMPREG(DISPC_CPR_COEF_R);
+	DUMPREG(DISPC_CPR_COEF_G);
+	DUMPREG(DISPC_CPR_COEF_B);
+
+	DUMPREG(DISPC_GFX_PRELOAD);
+
+	DUMPREG(DISPC_VID_BA0(0));
+	DUMPREG(DISPC_VID_BA1(0));
+	DUMPREG(DISPC_VID_POSITION(0));
+	DUMPREG(DISPC_VID_SIZE(0));
+	DUMPREG(DISPC_VID_ATTRIBUTES(0));
+	DUMPREG(DISPC_VID_FIFO_THRESHOLD(0));
+	DUMPREG(DISPC_VID_FIFO_SIZE_STATUS(0));
+	DUMPREG(DISPC_VID_ROW_INC(0));
+	DUMPREG(DISPC_VID_PIXEL_INC(0));
+	DUMPREG(DISPC_VID_FIR(0));
+	DUMPREG(DISPC_VID_PICTURE_SIZE(0));
+	DUMPREG(DISPC_VID_ACCU0(0));
+	DUMPREG(DISPC_VID_ACCU1(0));
+
+	DUMPREG(DISPC_VID_BA0(1));
+	DUMPREG(DISPC_VID_BA1(1));
+	DUMPREG(DISPC_VID_POSITION(1));
+	DUMPREG(DISPC_VID_SIZE(1));
+	DUMPREG(DISPC_VID_ATTRIBUTES(1));
+	DUMPREG(DISPC_VID_FIFO_THRESHOLD(1));
+	DUMPREG(DISPC_VID_FIFO_SIZE_STATUS(1));
+	DUMPREG(DISPC_VID_ROW_INC(1));
+	DUMPREG(DISPC_VID_PIXEL_INC(1));
+	DUMPREG(DISPC_VID_FIR(1));
+	DUMPREG(DISPC_VID_PICTURE_SIZE(1));
+	DUMPREG(DISPC_VID_ACCU0(1));
+	DUMPREG(DISPC_VID_ACCU1(1));
+
+	DUMPREG(DISPC_VID_FIR_COEF_H(0, 0));
+	DUMPREG(DISPC_VID_FIR_COEF_H(0, 1));
+	DUMPREG(DISPC_VID_FIR_COEF_H(0, 2));
+	DUMPREG(DISPC_VID_FIR_COEF_H(0, 3));
+	DUMPREG(DISPC_VID_FIR_COEF_H(0, 4));
+	DUMPREG(DISPC_VID_FIR_COEF_H(0, 5));
+	DUMPREG(DISPC_VID_FIR_COEF_H(0, 6));
+	DUMPREG(DISPC_VID_FIR_COEF_H(0, 7));
+	DUMPREG(DISPC_VID_FIR_COEF_HV(0, 0));
+	DUMPREG(DISPC_VID_FIR_COEF_HV(0, 1));
+	DUMPREG(DISPC_VID_FIR_COEF_HV(0, 2));
+	DUMPREG(DISPC_VID_FIR_COEF_HV(0, 3));
+	DUMPREG(DISPC_VID_FIR_COEF_HV(0, 4));
+	DUMPREG(DISPC_VID_FIR_COEF_HV(0, 5));
+	DUMPREG(DISPC_VID_FIR_COEF_HV(0, 6));
+	DUMPREG(DISPC_VID_FIR_COEF_HV(0, 7));
+	DUMPREG(DISPC_VID_CONV_COEF(0, 0));
+	DUMPREG(DISPC_VID_CONV_COEF(0, 1));
+	DUMPREG(DISPC_VID_CONV_COEF(0, 2));
+	DUMPREG(DISPC_VID_CONV_COEF(0, 3));
+	DUMPREG(DISPC_VID_CONV_COEF(0, 4));
+	DUMPREG(DISPC_VID_FIR_COEF_V(0, 0));
+	DUMPREG(DISPC_VID_FIR_COEF_V(0, 1));
+	DUMPREG(DISPC_VID_FIR_COEF_V(0, 2));
+	DUMPREG(DISPC_VID_FIR_COEF_V(0, 3));
+	DUMPREG(DISPC_VID_FIR_COEF_V(0, 4));
+	DUMPREG(DISPC_VID_FIR_COEF_V(0, 5));
+	DUMPREG(DISPC_VID_FIR_COEF_V(0, 6));
+	DUMPREG(DISPC_VID_FIR_COEF_V(0, 7));
+
+	DUMPREG(DISPC_VID_FIR_COEF_H(1, 0));
+	DUMPREG(DISPC_VID_FIR_COEF_H(1, 1));
+	DUMPREG(DISPC_VID_FIR_COEF_H(1, 2));
+	DUMPREG(DISPC_VID_FIR_COEF_H(1, 3));
+	DUMPREG(DISPC_VID_FIR_COEF_H(1, 4));
+	DUMPREG(DISPC_VID_FIR_COEF_H(1, 5));
+	DUMPREG(DISPC_VID_FIR_COEF_H(1, 6));
+	DUMPREG(DISPC_VID_FIR_COEF_H(1, 7));
+	DUMPREG(DISPC_VID_FIR_COEF_HV(1, 0));
+	DUMPREG(DISPC_VID_FIR_COEF_HV(1, 1));
+	DUMPREG(DISPC_VID_FIR_COEF_HV(1, 2));
+	DUMPREG(DISPC_VID_FIR_COEF_HV(1, 3));
+	DUMPREG(DISPC_VID_FIR_COEF_HV(1, 4));
+	DUMPREG(DISPC_VID_FIR_COEF_HV(1, 5));
+	DUMPREG(DISPC_VID_FIR_COEF_HV(1, 6));
+	DUMPREG(DISPC_VID_FIR_COEF_HV(1, 7));
+	DUMPREG(DISPC_VID_CONV_COEF(1, 0));
+	DUMPREG(DISPC_VID_CONV_COEF(1, 1));
+	DUMPREG(DISPC_VID_CONV_COEF(1, 2));
+	DUMPREG(DISPC_VID_CONV_COEF(1, 3));
+	DUMPREG(DISPC_VID_CONV_COEF(1, 4));
+	DUMPREG(DISPC_VID_FIR_COEF_V(1, 0));
+	DUMPREG(DISPC_VID_FIR_COEF_V(1, 1));
+	DUMPREG(DISPC_VID_FIR_COEF_V(1, 2));
+	DUMPREG(DISPC_VID_FIR_COEF_V(1, 3));
+	DUMPREG(DISPC_VID_FIR_COEF_V(1, 4));
+	DUMPREG(DISPC_VID_FIR_COEF_V(1, 5));
+	DUMPREG(DISPC_VID_FIR_COEF_V(1, 6));
+	DUMPREG(DISPC_VID_FIR_COEF_V(1, 7));
+
+	DUMPREG(DISPC_VID_PRELOAD(0));
+	DUMPREG(DISPC_VID_PRELOAD(1));
+
+	dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+#undef DUMPREG
+}
+
+static void _dispc_set_pol_freq(bool onoff, bool rf, bool ieo, bool ipc,
+				bool ihs, bool ivs, u8 acbi, u8 acb)
+{
+	u32 l = 0;
+
+	DSSDBG("onoff %d rf %d ieo %d ipc %d ihs %d ivs %d acbi %d acb %d\n",
+			onoff, rf, ieo, ipc, ihs, ivs, acbi, acb);
+
+	l |= FLD_VAL(onoff, 17, 17);
+	l |= FLD_VAL(rf, 16, 16);
+	l |= FLD_VAL(ieo, 15, 15);
+	l |= FLD_VAL(ipc, 14, 14);
+	l |= FLD_VAL(ihs, 13, 13);
+	l |= FLD_VAL(ivs, 12, 12);
+	l |= FLD_VAL(acbi, 11, 8);
+	l |= FLD_VAL(acb, 7, 0);
+
+	enable_clocks(1);
+	dispc_write_reg(DISPC_POL_FREQ, l);
+	enable_clocks(0);
+}
+
+void dispc_set_pol_freq(enum omap_panel_config config, u8 acbi, u8 acb)
+{
+	_dispc_set_pol_freq((config & OMAP_DSS_LCD_ONOFF) != 0,
+			(config & OMAP_DSS_LCD_RF) != 0,
+			(config & OMAP_DSS_LCD_IEO) != 0,
+			(config & OMAP_DSS_LCD_IPC) != 0,
+			(config & OMAP_DSS_LCD_IHS) != 0,
+			(config & OMAP_DSS_LCD_IVS) != 0,
+			acbi, acb);
+}
+
+/* with fck as input clock rate, find dispc dividers that produce req_pck */
+void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck,
+		struct dispc_clock_info *cinfo)
+{
+	u16 pcd_min = is_tft ? 2 : 3;
+	unsigned long best_pck;
+	u16 best_ld, cur_ld;
+	u16 best_pd, cur_pd;
+
+	best_pck = 0;
+	best_ld = 0;
+	best_pd = 0;
+
+	for (cur_ld = 1; cur_ld <= 255; ++cur_ld) {
+		unsigned long lck = fck / cur_ld;
+
+		for (cur_pd = pcd_min; cur_pd <= 255; ++cur_pd) {
+			unsigned long pck = lck / cur_pd;
+			long old_delta = abs(best_pck - req_pck);
+			long new_delta = abs(pck - req_pck);
+
+			if (best_pck == 0 || new_delta < old_delta) {
+				best_pck = pck;
+				best_ld = cur_ld;
+				best_pd = cur_pd;
+
+				if (pck == req_pck)
+					goto found;
+			}
+
+			if (pck < req_pck)
+				break;
+		}
+
+		if (lck / pcd_min < req_pck)
+			break;
+	}
+
+found:
+	cinfo->lck_div = best_ld;
+	cinfo->pck_div = best_pd;
+	cinfo->lck = fck / cinfo->lck_div;
+	cinfo->pck = cinfo->lck / cinfo->pck_div;
+}
+
+/* calculate clock rates using dividers in cinfo */
+int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
+		struct dispc_clock_info *cinfo)
+{
+	if (cinfo->lck_div > 255 || cinfo->lck_div == 0)
+		return -EINVAL;
+	if (cinfo->pck_div < 2 || cinfo->pck_div > 255)
+		return -EINVAL;
+
+	cinfo->lck = dispc_fclk_rate / cinfo->lck_div;
+	cinfo->pck = cinfo->lck / cinfo->pck_div;
+
+	return 0;
+}
+
+int dispc_set_clock_div(struct dispc_clock_info *cinfo)
+{
+	DSSDBG("lck = %lu (%u)\n", cinfo->lck, cinfo->lck_div);
+	DSSDBG("pck = %lu (%u)\n", cinfo->pck, cinfo->pck_div);
+
+	dispc_set_lcd_divisor(cinfo->lck_div, cinfo->pck_div);
+
+	return 0;
+}
+
+int dispc_get_clock_div(struct dispc_clock_info *cinfo)
+{
+	unsigned long fck;
+
+	fck = dispc_fclk_rate();
+
+	cinfo->lck_div = REG_GET(DISPC_DIVISOR, 23, 16);
+	cinfo->pck_div = REG_GET(DISPC_DIVISOR, 7, 0);
+
+	cinfo->lck = fck / cinfo->lck_div;
+	cinfo->pck = cinfo->lck / cinfo->pck_div;
+
+	return 0;
+}
+
+/* dispc.irq_lock has to be locked by the caller */
+static void _omap_dispc_set_irqs(void)
+{
+	u32 mask;
+	u32 old_mask;
+	int i;
+	struct omap_dispc_isr_data *isr_data;
+
+	mask = dispc.irq_error_mask;
+
+	for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
+		isr_data = &dispc.registered_isr[i];
+
+		if (isr_data->isr == NULL)
+			continue;
+
+		mask |= isr_data->mask;
+	}
+
+	enable_clocks(1);
+
+	old_mask = dispc_read_reg(DISPC_IRQENABLE);
+	/* clear the irqstatus for newly enabled irqs */
+	dispc_write_reg(DISPC_IRQSTATUS, (mask ^ old_mask) & mask);
+
+	dispc_write_reg(DISPC_IRQENABLE, mask);
+
+	enable_clocks(0);
+}
+
+int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
+{
+	int i;
+	int ret;
+	unsigned long flags;
+	struct omap_dispc_isr_data *isr_data;
+
+	if (isr == NULL)
+		return -EINVAL;
+
+	spin_lock_irqsave(&dispc.irq_lock, flags);
+
+	/* check for duplicate entry */
+	for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
+		isr_data = &dispc.registered_isr[i];
+		if (isr_data->isr == isr && isr_data->arg == arg &&
+				isr_data->mask == mask) {
+			ret = -EINVAL;
+			goto err;
+		}
+	}
+
+	isr_data = NULL;
+	ret = -EBUSY;
+
+	for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
+		isr_data = &dispc.registered_isr[i];
+
+		if (isr_data->isr != NULL)
+			continue;
+
+		isr_data->isr = isr;
+		isr_data->arg = arg;
+		isr_data->mask = mask;
+		ret = 0;
+
+		break;
+	}
+
+	_omap_dispc_set_irqs();
+
+	spin_unlock_irqrestore(&dispc.irq_lock, flags);
+
+	return 0;
+err:
+	spin_unlock_irqrestore(&dispc.irq_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(omap_dispc_register_isr);
+
+int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
+{
+	int i;
+	unsigned long flags;
+	int ret = -EINVAL;
+	struct omap_dispc_isr_data *isr_data;
+
+	spin_lock_irqsave(&dispc.irq_lock, flags);
+
+	for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
+		isr_data = &dispc.registered_isr[i];
+		if (isr_data->isr != isr || isr_data->arg != arg ||
+				isr_data->mask != mask)
+			continue;
+
+		/* found the correct isr */
+
+		isr_data->isr = NULL;
+		isr_data->arg = NULL;
+		isr_data->mask = 0;
+
+		ret = 0;
+		break;
+	}
+
+	if (ret == 0)
+		_omap_dispc_set_irqs();
+
+	spin_unlock_irqrestore(&dispc.irq_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(omap_dispc_unregister_isr);
+
+#ifdef DEBUG
+static void print_irq_status(u32 status)
+{
+	if ((status & dispc.irq_error_mask) == 0)
+		return;
+
+	printk(KERN_DEBUG "DISPC IRQ: 0x%x: ", status);
+
+#define PIS(x) \
+	if (status & DISPC_IRQ_##x) \
+		printk(#x " ");
+	PIS(GFX_FIFO_UNDERFLOW);
+	PIS(OCP_ERR);
+	PIS(VID1_FIFO_UNDERFLOW);
+	PIS(VID2_FIFO_UNDERFLOW);
+	PIS(SYNC_LOST);
+	PIS(SYNC_LOST_DIGIT);
+#undef PIS
+
+	printk("\n");
+}
+#endif
+
+/* Called from dss.c. Note that we don't touch clocks here,
+ * but we presume they are on because we got an IRQ. However,
+ * an irq handler may turn the clocks off, so we may not have
+ * clock later in the function. */
+void dispc_irq_handler(void)
+{
+	int i;
+	u32 irqstatus;
+	u32 handledirqs = 0;
+	u32 unhandled_errors;
+	struct omap_dispc_isr_data *isr_data;
+	struct omap_dispc_isr_data registered_isr[DISPC_MAX_NR_ISRS];
+
+	spin_lock(&dispc.irq_lock);
+
+	irqstatus = dispc_read_reg(DISPC_IRQSTATUS);
+
+#ifdef DEBUG
+	if (dss_debug)
+		print_irq_status(irqstatus);
+#endif
+	/* Ack the interrupt. Do it here before clocks are possibly turned
+	 * off */
+	dispc_write_reg(DISPC_IRQSTATUS, irqstatus);
+	/* flush posted write */
+	dispc_read_reg(DISPC_IRQSTATUS);
+
+	/* make a copy and unlock, so that isrs can unregister
+	 * themselves */
+	memcpy(registered_isr, dispc.registered_isr,
+			sizeof(registered_isr));
+
+	spin_unlock(&dispc.irq_lock);
+
+	for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
+		isr_data = &registered_isr[i];
+
+		if (!isr_data->isr)
+			continue;
+
+		if (isr_data->mask & irqstatus) {
+			isr_data->isr(isr_data->arg, irqstatus);
+			handledirqs |= isr_data->mask;
+		}
+	}
+
+	spin_lock(&dispc.irq_lock);
+
+	unhandled_errors = irqstatus & ~handledirqs & dispc.irq_error_mask;
+
+	if (unhandled_errors) {
+		dispc.error_irqs |= unhandled_errors;
+
+		dispc.irq_error_mask &= ~unhandled_errors;
+		_omap_dispc_set_irqs();
+
+		schedule_work(&dispc.error_work);
+	}
+
+	spin_unlock(&dispc.irq_lock);
+}
+
+static void dispc_error_worker(struct work_struct *work)
+{
+	int i;
+	u32 errors;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dispc.irq_lock, flags);
+	errors = dispc.error_irqs;
+	dispc.error_irqs = 0;
+	spin_unlock_irqrestore(&dispc.irq_lock, flags);
+
+	if (errors & DISPC_IRQ_GFX_FIFO_UNDERFLOW) {
+		DSSERR("GFX_FIFO_UNDERFLOW, disabling GFX\n");
+		for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+			struct omap_overlay *ovl;
+			ovl = omap_dss_get_overlay(i);
+
+			if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
+				continue;
+
+			if (ovl->id == 0) {
+				dispc_enable_plane(ovl->id, 0);
+				dispc_go(ovl->manager->id);
+				mdelay(50);
+				break;
+			}
+		}
+	}
+
+	if (errors & DISPC_IRQ_VID1_FIFO_UNDERFLOW) {
+		DSSERR("VID1_FIFO_UNDERFLOW, disabling VID1\n");
+		for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+			struct omap_overlay *ovl;
+			ovl = omap_dss_get_overlay(i);
+
+			if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
+				continue;
+
+			if (ovl->id == 1) {
+				dispc_enable_plane(ovl->id, 0);
+				dispc_go(ovl->manager->id);
+				mdelay(50);
+				break;
+			}
+		}
+	}
+
+	if (errors & DISPC_IRQ_VID2_FIFO_UNDERFLOW) {
+		DSSERR("VID2_FIFO_UNDERFLOW, disabling VID2\n");
+		for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+			struct omap_overlay *ovl;
+			ovl = omap_dss_get_overlay(i);
+
+			if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
+				continue;
+
+			if (ovl->id == 2) {
+				dispc_enable_plane(ovl->id, 0);
+				dispc_go(ovl->manager->id);
+				mdelay(50);
+				break;
+			}
+		}
+	}
+
+	if (errors & DISPC_IRQ_SYNC_LOST) {
+		struct omap_overlay_manager *manager = NULL;
+		bool enable = false;
+
+		DSSERR("SYNC_LOST, disabling LCD\n");
+
+		for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
+			struct omap_overlay_manager *mgr;
+			mgr = omap_dss_get_overlay_manager(i);
+
+			if (mgr->id == OMAP_DSS_CHANNEL_LCD) {
+				manager = mgr;
+				enable = mgr->device->state ==
+						OMAP_DSS_DISPLAY_ACTIVE;
+				mgr->device->disable(mgr->device);
+				break;
+			}
+		}
+
+		if (manager) {
+			for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+				struct omap_overlay *ovl;
+				ovl = omap_dss_get_overlay(i);
+
+				if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
+					continue;
+
+				if (ovl->id != 0 && ovl->manager == manager)
+					dispc_enable_plane(ovl->id, 0);
+			}
+
+			dispc_go(manager->id);
+			mdelay(50);
+			if (enable)
+				manager->device->enable(manager->device);
+		}
+	}
+
+	if (errors & DISPC_IRQ_SYNC_LOST_DIGIT) {
+		struct omap_overlay_manager *manager = NULL;
+		bool enable = false;
+
+		DSSERR("SYNC_LOST_DIGIT, disabling TV\n");
+
+		for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
+			struct omap_overlay_manager *mgr;
+			mgr = omap_dss_get_overlay_manager(i);
+
+			if (mgr->id == OMAP_DSS_CHANNEL_DIGIT) {
+				manager = mgr;
+				enable = mgr->device->state ==
+						OMAP_DSS_DISPLAY_ACTIVE;
+				mgr->device->disable(mgr->device);
+				break;
+			}
+		}
+
+		if (manager) {
+			for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+				struct omap_overlay *ovl;
+				ovl = omap_dss_get_overlay(i);
+
+				if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
+					continue;
+
+				if (ovl->id != 0 && ovl->manager == manager)
+					dispc_enable_plane(ovl->id, 0);
+			}
+
+			dispc_go(manager->id);
+			mdelay(50);
+			if (enable)
+				manager->device->enable(manager->device);
+		}
+	}
+
+	if (errors & DISPC_IRQ_OCP_ERR) {
+		DSSERR("OCP_ERR\n");
+		for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
+			struct omap_overlay_manager *mgr;
+			mgr = omap_dss_get_overlay_manager(i);
+
+			if (mgr->caps & OMAP_DSS_OVL_CAP_DISPC)
+				mgr->device->disable(mgr->device);
+		}
+	}
+
+	spin_lock_irqsave(&dispc.irq_lock, flags);
+	dispc.irq_error_mask |= errors;
+	_omap_dispc_set_irqs();
+	spin_unlock_irqrestore(&dispc.irq_lock, flags);
+}
+
+int omap_dispc_wait_for_irq_timeout(u32 irqmask, unsigned long timeout)
+{
+	void dispc_irq_wait_handler(void *data, u32 mask)
+	{
+		complete((struct completion *)data);
+	}
+
+	int r;
+	DECLARE_COMPLETION_ONSTACK(completion);
+
+	r = omap_dispc_register_isr(dispc_irq_wait_handler, &completion,
+			irqmask);
+
+	if (r)
+		return r;
+
+	timeout = wait_for_completion_timeout(&completion, timeout);
+
+	omap_dispc_unregister_isr(dispc_irq_wait_handler, &completion, irqmask);
+
+	if (timeout == 0)
+		return -ETIMEDOUT;
+
+	if (timeout == -ERESTARTSYS)
+		return -ERESTARTSYS;
+
+	return 0;
+}
+
+int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
+		unsigned long timeout)
+{
+	void dispc_irq_wait_handler(void *data, u32 mask)
+	{
+		complete((struct completion *)data);
+	}
+
+	int r;
+	DECLARE_COMPLETION_ONSTACK(completion);
+
+	r = omap_dispc_register_isr(dispc_irq_wait_handler, &completion,
+			irqmask);
+
+	if (r)
+		return r;
+
+	timeout = wait_for_completion_interruptible_timeout(&completion,
+			timeout);
+
+	omap_dispc_unregister_isr(dispc_irq_wait_handler, &completion, irqmask);
+
+	if (timeout == 0)
+		return -ETIMEDOUT;
+
+	if (timeout == -ERESTARTSYS)
+		return -ERESTARTSYS;
+
+	return 0;
+}
+
+#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
+void dispc_fake_vsync_irq(void)
+{
+	u32 irqstatus = DISPC_IRQ_VSYNC;
+	int i;
+
+	local_irq_disable();
+
+	for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
+		struct omap_dispc_isr_data *isr_data;
+		isr_data = &dispc.registered_isr[i];
+
+		if (!isr_data->isr)
+			continue;
+
+		if (isr_data->mask & irqstatus)
+			isr_data->isr(isr_data->arg, irqstatus);
+	}
+
+	local_irq_enable();
+}
+#endif
+
+static void _omap_dispc_initialize_irq(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dispc.irq_lock, flags);
+
+	memset(dispc.registered_isr, 0, sizeof(dispc.registered_isr));
+
+	dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR;
+
+	/* there's SYNC_LOST_DIGIT waiting after enabling the DSS,
+	 * so clear it */
+	dispc_write_reg(DISPC_IRQSTATUS, dispc_read_reg(DISPC_IRQSTATUS));
+
+	_omap_dispc_set_irqs();
+
+	spin_unlock_irqrestore(&dispc.irq_lock, flags);
+}
+
+void dispc_enable_sidle(void)
+{
+	REG_FLD_MOD(DISPC_SYSCONFIG, 2, 4, 3);	/* SIDLEMODE: smart idle */
+}
+
+void dispc_disable_sidle(void)
+{
+	REG_FLD_MOD(DISPC_SYSCONFIG, 1, 4, 3);	/* SIDLEMODE: no idle */
+}
+
+static void _omap_dispc_initial_config(void)
+{
+	u32 l;
+
+	l = dispc_read_reg(DISPC_SYSCONFIG);
+	l = FLD_MOD(l, 2, 13, 12);	/* MIDLEMODE: smart standby */
+	l = FLD_MOD(l, 2, 4, 3);	/* SIDLEMODE: smart idle */
+	l = FLD_MOD(l, 1, 2, 2);	/* ENWAKEUP */
+	l = FLD_MOD(l, 1, 0, 0);	/* AUTOIDLE */
+	dispc_write_reg(DISPC_SYSCONFIG, l);
+
+	/* FUNCGATED */
+	REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
+
+	/* L3 firewall setting: enable access to OCM RAM */
+	/* XXX this should be somewhere in plat-omap */
+	if (cpu_is_omap24xx())
+		__raw_writel(0x402000b0, OMAP2_L3_IO_ADDRESS(0x680050a0));
+
+	_dispc_setup_color_conv_coef();
+
+	dispc_set_loadmode(OMAP_DSS_LOAD_FRAME_ONLY);
+
+	dispc_read_plane_fifo_sizes();
+}
+
+int dispc_init(void)
+{
+	u32 rev;
+
+	spin_lock_init(&dispc.irq_lock);
+
+	INIT_WORK(&dispc.error_work, dispc_error_worker);
+
+	dispc.base = ioremap(DISPC_BASE, DISPC_SZ_REGS);
+	if (!dispc.base) {
+		DSSERR("can't ioremap DISPC\n");
+		return -ENOMEM;
+	}
+
+	enable_clocks(1);
+
+	_omap_dispc_initial_config();
+
+	_omap_dispc_initialize_irq();
+
+	dispc_save_context();
+
+	rev = dispc_read_reg(DISPC_REVISION);
+	printk(KERN_INFO "OMAP DISPC rev %d.%d\n",
+	       FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+
+	enable_clocks(0);
+
+	return 0;
+}
+
+void dispc_exit(void)
+{
+	iounmap(dispc.base);
+}
+
+int dispc_enable_plane(enum omap_plane plane, bool enable)
+{
+	DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
+
+	enable_clocks(1);
+	_dispc_enable_plane(plane, enable);
+	enable_clocks(0);
+
+	return 0;
+}
+
+int dispc_setup_plane(enum omap_plane plane,
+		       u32 paddr, u16 screen_width,
+		       u16 pos_x, u16 pos_y,
+		       u16 width, u16 height,
+		       u16 out_width, u16 out_height,
+		       enum omap_color_mode color_mode,
+		       bool ilace,
+		       enum omap_dss_rotation_type rotation_type,
+		       u8 rotation, bool mirror, u8 global_alpha)
+{
+	int r = 0;
+
+	DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d,%d, %dx%d -> "
+	       "%dx%d, ilace %d, cmode %x, rot %d, mir %d\n",
+	       plane, paddr, screen_width, pos_x, pos_y,
+	       width, height,
+	       out_width, out_height,
+	       ilace, color_mode,
+	       rotation, mirror);
+
+	enable_clocks(1);
+
+	r = _dispc_setup_plane(plane,
+			   paddr, screen_width,
+			   pos_x, pos_y,
+			   width, height,
+			   out_width, out_height,
+			   color_mode, ilace,
+			   rotation_type,
+			   rotation, mirror,
+			   global_alpha);
+
+	enable_clocks(0);
+
+	return r;
+}
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
new file mode 100644
index 0000000..3b92b84
--- /dev/null
+++ b/drivers/video/omap2/dss/display.c
@@ -0,0 +1,671 @@
+/*
+ * linux/drivers/video/omap2/dss/display.c
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "DISPLAY"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+
+#include <plat/display.h>
+#include "dss.h"
+
+static LIST_HEAD(display_list);
+
+static ssize_t display_enabled_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+	bool enabled = dssdev->state != OMAP_DSS_DISPLAY_DISABLED;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", enabled);
+}
+
+static ssize_t display_enabled_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t size)
+{
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+	bool enabled, r;
+
+	enabled = simple_strtoul(buf, NULL, 10);
+
+	if (enabled != (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)) {
+		if (enabled) {
+			r = dssdev->enable(dssdev);
+			if (r)
+				return r;
+		} else {
+			dssdev->disable(dssdev);
+		}
+	}
+
+	return size;
+}
+
+static ssize_t display_upd_mode_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+	enum omap_dss_update_mode mode = OMAP_DSS_UPDATE_AUTO;
+	if (dssdev->get_update_mode)
+		mode = dssdev->get_update_mode(dssdev);
+	return snprintf(buf, PAGE_SIZE, "%d\n", mode);
+}
+
+static ssize_t display_upd_mode_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t size)
+{
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+	int val, r;
+	enum omap_dss_update_mode mode;
+
+	val = simple_strtoul(buf, NULL, 10);
+
+	switch (val) {
+	case OMAP_DSS_UPDATE_DISABLED:
+	case OMAP_DSS_UPDATE_AUTO:
+	case OMAP_DSS_UPDATE_MANUAL:
+		mode = (enum omap_dss_update_mode)val;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	r = dssdev->set_update_mode(dssdev, mode);
+	if (r)
+		return r;
+
+	return size;
+}
+
+static ssize_t display_tear_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			dssdev->get_te ? dssdev->get_te(dssdev) : 0);
+}
+
+static ssize_t display_tear_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+	unsigned long te;
+	int r;
+
+	if (!dssdev->enable_te || !dssdev->get_te)
+		return -ENOENT;
+
+	te = simple_strtoul(buf, NULL, 0);
+
+	r = dssdev->enable_te(dssdev, te);
+	if (r)
+		return r;
+
+	return size;
+}
+
+static ssize_t display_timings_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+	struct omap_video_timings t;
+
+	if (!dssdev->get_timings)
+		return -ENOENT;
+
+	dssdev->get_timings(dssdev, &t);
+
+	return snprintf(buf, PAGE_SIZE, "%u,%u/%u/%u/%u,%u/%u/%u/%u\n",
+			t.pixel_clock,
+			t.x_res, t.hfp, t.hbp, t.hsw,
+			t.y_res, t.vfp, t.vbp, t.vsw);
+}
+
+static ssize_t display_timings_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+	struct omap_video_timings t;
+	int r, found;
+
+	if (!dssdev->set_timings || !dssdev->check_timings)
+		return -ENOENT;
+
+	found = 0;
+#ifdef CONFIG_OMAP2_DSS_VENC
+	if (strncmp("pal", buf, 3) == 0) {
+		t = omap_dss_pal_timings;
+		found = 1;
+	} else if (strncmp("ntsc", buf, 4) == 0) {
+		t = omap_dss_ntsc_timings;
+		found = 1;
+	}
+#endif
+	if (!found && sscanf(buf, "%u,%hu/%hu/%hu/%hu,%hu/%hu/%hu/%hu",
+				&t.pixel_clock,
+				&t.x_res, &t.hfp, &t.hbp, &t.hsw,
+				&t.y_res, &t.vfp, &t.vbp, &t.vsw) != 9)
+		return -EINVAL;
+
+	r = dssdev->check_timings(dssdev, &t);
+	if (r)
+		return r;
+
+	dssdev->set_timings(dssdev, &t);
+
+	return size;
+}
+
+static ssize_t display_rotate_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+	int rotate;
+	if (!dssdev->get_rotate)
+		return -ENOENT;
+	rotate = dssdev->get_rotate(dssdev);
+	return snprintf(buf, PAGE_SIZE, "%u\n", rotate);
+}
+
+static ssize_t display_rotate_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+	unsigned long rot;
+	int r;
+
+	if (!dssdev->set_rotate || !dssdev->get_rotate)
+		return -ENOENT;
+
+	rot = simple_strtoul(buf, NULL, 0);
+
+	r = dssdev->set_rotate(dssdev, rot);
+	if (r)
+		return r;
+
+	return size;
+}
+
+static ssize_t display_mirror_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+	int mirror;
+	if (!dssdev->get_mirror)
+		return -ENOENT;
+	mirror = dssdev->get_mirror(dssdev);
+	return snprintf(buf, PAGE_SIZE, "%u\n", mirror);
+}
+
+static ssize_t display_mirror_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+	unsigned long mirror;
+	int r;
+
+	if (!dssdev->set_mirror || !dssdev->get_mirror)
+		return -ENOENT;
+
+	mirror = simple_strtoul(buf, NULL, 0);
+
+	r = dssdev->set_mirror(dssdev, mirror);
+	if (r)
+		return r;
+
+	return size;
+}
+
+static ssize_t display_wss_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+	unsigned int wss;
+
+	if (!dssdev->get_wss)
+		return -ENOENT;
+
+	wss = dssdev->get_wss(dssdev);
+
+	return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss);
+}
+
+static ssize_t display_wss_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+	unsigned long wss;
+	int r;
+
+	if (!dssdev->get_wss || !dssdev->set_wss)
+		return -ENOENT;
+
+	if (strict_strtoul(buf, 0, &wss))
+		return -EINVAL;
+
+	if (wss > 0xfffff)
+		return -EINVAL;
+
+	r = dssdev->set_wss(dssdev, wss);
+	if (r)
+		return r;
+
+	return size;
+}
+
+static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR,
+		display_enabled_show, display_enabled_store);
+static DEVICE_ATTR(update_mode, S_IRUGO|S_IWUSR,
+		display_upd_mode_show, display_upd_mode_store);
+static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR,
+		display_tear_show, display_tear_store);
+static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR,
+		display_timings_show, display_timings_store);
+static DEVICE_ATTR(rotate, S_IRUGO|S_IWUSR,
+		display_rotate_show, display_rotate_store);
+static DEVICE_ATTR(mirror, S_IRUGO|S_IWUSR,
+		display_mirror_show, display_mirror_store);
+static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR,
+		display_wss_show, display_wss_store);
+
+static struct device_attribute *display_sysfs_attrs[] = {
+	&dev_attr_enabled,
+	&dev_attr_update_mode,
+	&dev_attr_tear_elim,
+	&dev_attr_timings,
+	&dev_attr_rotate,
+	&dev_attr_mirror,
+	&dev_attr_wss,
+	NULL
+};
+
+static void default_get_resolution(struct omap_dss_device *dssdev,
+			u16 *xres, u16 *yres)
+{
+	*xres = dssdev->panel.timings.x_res;
+	*yres = dssdev->panel.timings.y_res;
+}
+
+void default_get_overlay_fifo_thresholds(enum omap_plane plane,
+		u32 fifo_size, enum omap_burst_size *burst_size,
+		u32 *fifo_low, u32 *fifo_high)
+{
+	unsigned burst_size_bytes;
+
+	*burst_size = OMAP_DSS_BURST_16x32;
+	burst_size_bytes = 16 * 32 / 8;
+
+	*fifo_high = fifo_size - 1;
+	*fifo_low = fifo_size - burst_size_bytes;
+}
+
+static int default_wait_vsync(struct omap_dss_device *dssdev)
+{
+	unsigned long timeout = msecs_to_jiffies(500);
+	u32 irq;
+
+	if (dssdev->type == OMAP_DISPLAY_TYPE_VENC)
+		irq = DISPC_IRQ_EVSYNC_ODD;
+	else
+		irq = DISPC_IRQ_VSYNC;
+
+	return omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
+}
+
+static int default_get_recommended_bpp(struct omap_dss_device *dssdev)
+{
+	if (dssdev->panel.recommended_bpp)
+		return dssdev->panel.recommended_bpp;
+
+	switch (dssdev->type) {
+	case OMAP_DISPLAY_TYPE_DPI:
+		if (dssdev->phy.dpi.data_lines == 24)
+			return 24;
+		else
+			return 16;
+
+	case OMAP_DISPLAY_TYPE_DBI:
+	case OMAP_DISPLAY_TYPE_DSI:
+		if (dssdev->ctrl.pixel_size == 24)
+			return 24;
+		else
+			return 16;
+	case OMAP_DISPLAY_TYPE_VENC:
+	case OMAP_DISPLAY_TYPE_SDI:
+		return 24;
+		return 24;
+	default:
+		BUG();
+	}
+}
+
+/* Checks if replication logic should be used. Only use for active matrix,
+ * when overlay is in RGB12U or RGB16 mode, and LCD interface is
+ * 18bpp or 24bpp */
+bool dss_use_replication(struct omap_dss_device *dssdev,
+		enum omap_color_mode mode)
+{
+	int bpp;
+
+	if (mode != OMAP_DSS_COLOR_RGB12U && mode != OMAP_DSS_COLOR_RGB16)
+		return false;
+
+	if (dssdev->type == OMAP_DISPLAY_TYPE_DPI &&
+			(dssdev->panel.config & OMAP_DSS_LCD_TFT) == 0)
+		return false;
+
+	switch (dssdev->type) {
+	case OMAP_DISPLAY_TYPE_DPI:
+		bpp = dssdev->phy.dpi.data_lines;
+		break;
+	case OMAP_DISPLAY_TYPE_VENC:
+	case OMAP_DISPLAY_TYPE_SDI:
+		bpp = 24;
+		break;
+	case OMAP_DISPLAY_TYPE_DBI:
+	case OMAP_DISPLAY_TYPE_DSI:
+		bpp = dssdev->ctrl.pixel_size;
+		break;
+	default:
+		BUG();
+	}
+
+	return bpp > 16;
+}
+
+void dss_init_device(struct platform_device *pdev,
+		struct omap_dss_device *dssdev)
+{
+	struct device_attribute *attr;
+	int i;
+	int r;
+
+	switch (dssdev->type) {
+	case OMAP_DISPLAY_TYPE_DPI:
+#ifdef CONFIG_OMAP2_DSS_RFBI
+	case OMAP_DISPLAY_TYPE_DBI:
+#endif
+#ifdef CONFIG_OMAP2_DSS_SDI
+	case OMAP_DISPLAY_TYPE_SDI:
+#endif
+#ifdef CONFIG_OMAP2_DSS_DSI
+	case OMAP_DISPLAY_TYPE_DSI:
+#endif
+#ifdef CONFIG_OMAP2_DSS_VENC
+	case OMAP_DISPLAY_TYPE_VENC:
+#endif
+		break;
+	default:
+		DSSERR("Support for display '%s' not compiled in.\n",
+				dssdev->name);
+		return;
+	}
+
+	dssdev->get_resolution = default_get_resolution;
+	dssdev->get_recommended_bpp = default_get_recommended_bpp;
+	dssdev->wait_vsync = default_wait_vsync;
+
+	switch (dssdev->type) {
+	case OMAP_DISPLAY_TYPE_DPI:
+		r = dpi_init_display(dssdev);
+		break;
+#ifdef CONFIG_OMAP2_DSS_RFBI
+	case OMAP_DISPLAY_TYPE_DBI:
+		r = rfbi_init_display(dssdev);
+		break;
+#endif
+#ifdef CONFIG_OMAP2_DSS_VENC
+	case OMAP_DISPLAY_TYPE_VENC:
+		r = venc_init_display(dssdev);
+		break;
+#endif
+#ifdef CONFIG_OMAP2_DSS_SDI
+	case OMAP_DISPLAY_TYPE_SDI:
+		r = sdi_init_display(dssdev);
+		break;
+#endif
+#ifdef CONFIG_OMAP2_DSS_DSI
+	case OMAP_DISPLAY_TYPE_DSI:
+		r = dsi_init_display(dssdev);
+		break;
+#endif
+	default:
+		BUG();
+	}
+
+	if (r) {
+		DSSERR("failed to init display %s\n", dssdev->name);
+		return;
+	}
+
+	/* create device sysfs files */
+	i = 0;
+	while ((attr = display_sysfs_attrs[i++]) != NULL) {
+		r = device_create_file(&dssdev->dev, attr);
+		if (r)
+			DSSERR("failed to create sysfs file\n");
+	}
+
+	/* create display? sysfs links */
+	r = sysfs_create_link(&pdev->dev.kobj, &dssdev->dev.kobj,
+			dev_name(&dssdev->dev));
+	if (r)
+		DSSERR("failed to create sysfs display link\n");
+}
+
+void dss_uninit_device(struct platform_device *pdev,
+		struct omap_dss_device *dssdev)
+{
+	struct device_attribute *attr;
+	int i = 0;
+
+	sysfs_remove_link(&pdev->dev.kobj, dev_name(&dssdev->dev));
+
+	while ((attr = display_sysfs_attrs[i++]) != NULL)
+		device_remove_file(&dssdev->dev, attr);
+
+	if (dssdev->manager)
+		dssdev->manager->unset_device(dssdev->manager);
+}
+
+static int dss_suspend_device(struct device *dev, void *data)
+{
+	int r;
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+
+	if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
+		dssdev->activate_after_resume = false;
+		return 0;
+	}
+
+	if (!dssdev->suspend) {
+		DSSERR("display '%s' doesn't implement suspend\n",
+				dssdev->name);
+		return -ENOSYS;
+	}
+
+	r = dssdev->suspend(dssdev);
+	if (r)
+		return r;
+
+	dssdev->activate_after_resume = true;
+
+	return 0;
+}
+
+int dss_suspend_all_devices(void)
+{
+	int r;
+	struct bus_type *bus = dss_get_bus();
+
+	r = bus_for_each_dev(bus, NULL, NULL, dss_suspend_device);
+	if (r) {
+		/* resume all displays that were suspended */
+		dss_resume_all_devices();
+		return r;
+	}
+
+	return 0;
+}
+
+static int dss_resume_device(struct device *dev, void *data)
+{
+	int r;
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+
+	if (dssdev->activate_after_resume && dssdev->resume) {
+		r = dssdev->resume(dssdev);
+		if (r)
+			return r;
+	}
+
+	dssdev->activate_after_resume = false;
+
+	return 0;
+}
+
+int dss_resume_all_devices(void)
+{
+	struct bus_type *bus = dss_get_bus();
+
+	return bus_for_each_dev(bus, NULL, NULL, dss_resume_device);
+}
+
+static int dss_disable_device(struct device *dev, void *data)
+{
+	struct omap_dss_device *dssdev = to_dss_device(dev);
+	dssdev->disable(dssdev);
+	return 0;
+}
+
+void dss_disable_all_devices(void)
+{
+	struct bus_type *bus = dss_get_bus();
+	bus_for_each_dev(bus, NULL, NULL, dss_disable_device);
+}
+
+
+void omap_dss_get_device(struct omap_dss_device *dssdev)
+{
+	get_device(&dssdev->dev);
+}
+EXPORT_SYMBOL(omap_dss_get_device);
+
+void omap_dss_put_device(struct omap_dss_device *dssdev)
+{
+	put_device(&dssdev->dev);
+}
+EXPORT_SYMBOL(omap_dss_put_device);
+
+/* ref count of the found device is incremented. ref count
+ * of from-device is decremented. */
+struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from)
+{
+	struct device *dev;
+	struct device *dev_start = NULL;
+	struct omap_dss_device *dssdev = NULL;
+
+	int match(struct device *dev, void *data)
+	{
+		/* skip panels connected to controllers */
+		if (to_dss_device(dev)->panel.ctrl)
+			return 0;
+
+		return 1;
+	}
+
+	if (from)
+		dev_start = &from->dev;
+	dev = bus_find_device(dss_get_bus(), dev_start, NULL, match);
+	if (dev)
+		dssdev = to_dss_device(dev);
+	if (from)
+		put_device(&from->dev);
+
+	return dssdev;
+}
+EXPORT_SYMBOL(omap_dss_get_next_device);
+
+struct omap_dss_device *omap_dss_find_device(void *data,
+		int (*match)(struct omap_dss_device *dssdev, void *data))
+{
+	struct omap_dss_device *dssdev = NULL;
+
+	while ((dssdev = omap_dss_get_next_device(dssdev)) != NULL) {
+		if (match(dssdev, data))
+			return dssdev;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(omap_dss_find_device);
+
+int omap_dss_start_device(struct omap_dss_device *dssdev)
+{
+	int r;
+
+	if (!dssdev->driver) {
+		DSSDBG("no driver\n");
+		r = -ENODEV;
+		goto err0;
+	}
+
+	if (dssdev->ctrl.panel && !dssdev->ctrl.panel->driver) {
+		DSSDBG("no panel driver\n");
+		r = -ENODEV;
+		goto err0;
+	}
+
+	if (!try_module_get(dssdev->dev.driver->owner)) {
+		r = -ENODEV;
+		goto err0;
+	}
+
+	if (dssdev->ctrl.panel) {
+		if (!try_module_get(dssdev->ctrl.panel->dev.driver->owner)) {
+			r = -ENODEV;
+			goto err1;
+		}
+	}
+
+	return 0;
+err1:
+	module_put(dssdev->dev.driver->owner);
+err0:
+	return r;
+}
+EXPORT_SYMBOL(omap_dss_start_device);
+
+void omap_dss_stop_device(struct omap_dss_device *dssdev)
+{
+	if (dssdev->ctrl.panel)
+		module_put(dssdev->ctrl.panel->dev.driver->owner);
+
+	module_put(dssdev->dev.driver->owner);
+}
+EXPORT_SYMBOL(omap_dss_stop_device);
+
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
new file mode 100644
index 0000000..2d71031
--- /dev/null
+++ b/drivers/video/omap2/dss/dpi.c
@@ -0,0 +1,399 @@
+/*
+ * linux/drivers/video/omap2/dss/dpi.c
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "DPI"
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+
+#include <plat/display.h>
+#include <plat/cpu.h>
+
+#include "dss.h"
+
+static struct {
+	int update_enabled;
+} dpi;
+
+#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
+static int dpi_set_dsi_clk(bool is_tft, unsigned long pck_req,
+		unsigned long *fck, int *lck_div, int *pck_div)
+{
+	struct dsi_clock_info dsi_cinfo;
+	struct dispc_clock_info dispc_cinfo;
+	int r;
+
+	r = dsi_pll_calc_clock_div_pck(is_tft, pck_req, &dsi_cinfo,
+			&dispc_cinfo);
+	if (r)
+		return r;
+
+	r = dsi_pll_set_clock_div(&dsi_cinfo);
+	if (r)
+		return r;
+
+	dss_select_clk_source(0, 1);
+
+	r = dispc_set_clock_div(&dispc_cinfo);
+	if (r)
+		return r;
+
+	*fck = dsi_cinfo.dsi1_pll_fclk;
+	*lck_div = dispc_cinfo.lck_div;
+	*pck_div = dispc_cinfo.pck_div;
+
+	return 0;
+}
+#else
+static int dpi_set_dispc_clk(bool is_tft, unsigned long pck_req,
+		unsigned long *fck, int *lck_div, int *pck_div)
+{
+	struct dss_clock_info dss_cinfo;
+	struct dispc_clock_info dispc_cinfo;
+	int r;
+
+	r = dss_calc_clock_div(is_tft, pck_req, &dss_cinfo, &dispc_cinfo);
+	if (r)
+		return r;
+
+	r = dss_set_clock_div(&dss_cinfo);
+	if (r)
+		return r;
+
+	r = dispc_set_clock_div(&dispc_cinfo);
+	if (r)
+		return r;
+
+	*fck = dss_cinfo.fck;
+	*lck_div = dispc_cinfo.lck_div;
+	*pck_div = dispc_cinfo.pck_div;
+
+	return 0;
+}
+#endif
+
+static int dpi_set_mode(struct omap_dss_device *dssdev)
+{
+	struct omap_video_timings *t = &dssdev->panel.timings;
+	int lck_div, pck_div;
+	unsigned long fck;
+	unsigned long pck;
+	bool is_tft;
+	int r = 0;
+
+	dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+	dispc_set_pol_freq(dssdev->panel.config, dssdev->panel.acbi,
+			dssdev->panel.acb);
+
+	is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
+
+#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
+	r = dpi_set_dsi_clk(is_tft, t->pixel_clock * 1000,
+			&fck, &lck_div, &pck_div);
+#else
+	r = dpi_set_dispc_clk(is_tft, t->pixel_clock * 1000,
+			&fck, &lck_div, &pck_div);
+#endif
+	if (r)
+		goto err0;
+
+	pck = fck / lck_div / pck_div / 1000;
+
+	if (pck != t->pixel_clock) {
+		DSSWARN("Could not find exact pixel clock. "
+				"Requested %d kHz, got %lu kHz\n",
+				t->pixel_clock, pck);
+
+		t->pixel_clock = pck;
+	}
+
+	dispc_set_lcd_timings(t);
+
+err0:
+	dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+	return r;
+}
+
+static int dpi_basic_init(struct omap_dss_device *dssdev)
+{
+	bool is_tft;
+
+	is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
+
+	dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_BYPASS);
+	dispc_set_lcd_display_type(is_tft ? OMAP_DSS_LCD_DISPLAY_TFT :
+			OMAP_DSS_LCD_DISPLAY_STN);
+	dispc_set_tft_data_lines(dssdev->phy.dpi.data_lines);
+
+	return 0;
+}
+
+static int dpi_display_enable(struct omap_dss_device *dssdev)
+{
+	int r;
+
+	r = omap_dss_start_device(dssdev);
+	if (r) {
+		DSSERR("failed to start device\n");
+		goto err0;
+	}
+
+	if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
+		DSSERR("display already enabled\n");
+		r = -EINVAL;
+		goto err1;
+	}
+
+	dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+	r = dpi_basic_init(dssdev);
+	if (r)
+		goto err2;
+
+#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
+	dss_clk_enable(DSS_CLK_FCK2);
+	r = dsi_pll_init(dssdev, 0, 1);
+	if (r)
+		goto err3;
+#endif
+	r = dpi_set_mode(dssdev);
+	if (r)
+		goto err4;
+
+	mdelay(2);
+
+	dispc_enable_lcd_out(1);
+
+	r = dssdev->driver->enable(dssdev);
+	if (r)
+		goto err5;
+
+	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+	return 0;
+
+err5:
+	dispc_enable_lcd_out(0);
+err4:
+#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
+	dsi_pll_uninit();
+err3:
+	dss_clk_disable(DSS_CLK_FCK2);
+#endif
+err2:
+	dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+err1:
+	omap_dss_stop_device(dssdev);
+err0:
+	return r;
+}
+
+static int dpi_display_resume(struct omap_dss_device *dssdev);
+
+static void dpi_display_disable(struct omap_dss_device *dssdev)
+{
+	if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED)
+		return;
+
+	if (dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
+		dpi_display_resume(dssdev);
+
+	dssdev->driver->disable(dssdev);
+
+	dispc_enable_lcd_out(0);
+
+#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
+	dss_select_clk_source(0, 0);
+	dsi_pll_uninit();
+	dss_clk_disable(DSS_CLK_FCK2);
+#endif
+
+	dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+	dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+
+	omap_dss_stop_device(dssdev);
+}
+
+static int dpi_display_suspend(struct omap_dss_device *dssdev)
+{
+	if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+		return -EINVAL;
+
+	DSSDBG("dpi_display_suspend\n");
+
+	if (dssdev->driver->suspend)
+		dssdev->driver->suspend(dssdev);
+
+	dispc_enable_lcd_out(0);
+
+	dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+	dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+
+	return 0;
+}
+
+static int dpi_display_resume(struct omap_dss_device *dssdev)
+{
+	if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED)
+		return -EINVAL;
+
+	DSSDBG("dpi_display_resume\n");
+
+	dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+	dispc_enable_lcd_out(1);
+
+	if (dssdev->driver->resume)
+		dssdev->driver->resume(dssdev);
+
+	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+	return 0;
+}
+
+static void dpi_set_timings(struct omap_dss_device *dssdev,
+			struct omap_video_timings *timings)
+{
+	DSSDBG("dpi_set_timings\n");
+	dssdev->panel.timings = *timings;
+	if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+		dpi_set_mode(dssdev);
+		dispc_go(OMAP_DSS_CHANNEL_LCD);
+	}
+}
+
+static int dpi_check_timings(struct omap_dss_device *dssdev,
+			struct omap_video_timings *timings)
+{
+	bool is_tft;
+	int r;
+	int lck_div, pck_div;
+	unsigned long fck;
+	unsigned long pck;
+
+	if (!dispc_lcd_timings_ok(timings))
+		return -EINVAL;
+
+	if (timings->pixel_clock == 0)
+		return -EINVAL;
+
+	is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
+
+#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
+	{
+		struct dsi_clock_info dsi_cinfo;
+		struct dispc_clock_info dispc_cinfo;
+		r = dsi_pll_calc_clock_div_pck(is_tft,
+				timings->pixel_clock * 1000,
+				&dsi_cinfo, &dispc_cinfo);
+
+		if (r)
+			return r;
+
+		fck = dsi_cinfo.dsi1_pll_fclk;
+		lck_div = dispc_cinfo.lck_div;
+		pck_div = dispc_cinfo.pck_div;
+	}
+#else
+	{
+		struct dss_clock_info dss_cinfo;
+		struct dispc_clock_info dispc_cinfo;
+		r = dss_calc_clock_div(is_tft, timings->pixel_clock * 1000,
+				&dss_cinfo, &dispc_cinfo);
+
+		if (r)
+			return r;
+
+		fck = dss_cinfo.fck;
+		lck_div = dispc_cinfo.lck_div;
+		pck_div = dispc_cinfo.pck_div;
+	}
+#endif
+
+	pck = fck / lck_div / pck_div / 1000;
+
+	timings->pixel_clock = pck;
+
+	return 0;
+}
+
+static void dpi_get_timings(struct omap_dss_device *dssdev,
+			struct omap_video_timings *timings)
+{
+	*timings = dssdev->panel.timings;
+}
+
+static int dpi_display_set_update_mode(struct omap_dss_device *dssdev,
+		enum omap_dss_update_mode mode)
+{
+	if (mode == OMAP_DSS_UPDATE_MANUAL)
+		return -EINVAL;
+
+	if (mode == OMAP_DSS_UPDATE_DISABLED) {
+		dispc_enable_lcd_out(0);
+		dpi.update_enabled = 0;
+	} else {
+		dispc_enable_lcd_out(1);
+		dpi.update_enabled = 1;
+	}
+
+	return 0;
+}
+
+static enum omap_dss_update_mode dpi_display_get_update_mode(
+		struct omap_dss_device *dssdev)
+{
+	return dpi.update_enabled ? OMAP_DSS_UPDATE_AUTO :
+		OMAP_DSS_UPDATE_DISABLED;
+}
+
+int dpi_init_display(struct omap_dss_device *dssdev)
+{
+	DSSDBG("init_display\n");
+
+	dssdev->enable = dpi_display_enable;
+	dssdev->disable = dpi_display_disable;
+	dssdev->suspend = dpi_display_suspend;
+	dssdev->resume = dpi_display_resume;
+	dssdev->set_timings = dpi_set_timings;
+	dssdev->check_timings = dpi_check_timings;
+	dssdev->get_timings = dpi_get_timings;
+	dssdev->set_update_mode = dpi_display_set_update_mode;
+	dssdev->get_update_mode = dpi_display_get_update_mode;
+
+	return 0;
+}
+
+int dpi_init(void)
+{
+	return 0;
+}
+
+void dpi_exit(void)
+{
+}
+
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
new file mode 100644
index 0000000..5936487
--- /dev/null
+++ b/drivers/video/omap2/dss/dsi.c
@@ -0,0 +1,3710 @@
+/*
+ * linux/drivers/video/omap2/dss/dsi.c
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "DSI"
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/seq_file.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+
+#include <plat/display.h>
+#include <plat/clock.h>
+
+#include "dss.h"
+
+/*#define VERBOSE_IRQ*/
+#define DSI_CATCH_MISSING_TE
+
+#define DSI_BASE		0x4804FC00
+
+struct dsi_reg { u16 idx; };
+
+#define DSI_REG(idx)		((const struct dsi_reg) { idx })
+
+#define DSI_SZ_REGS		SZ_1K
+/* DSI Protocol Engine */
+
+#define DSI_REVISION			DSI_REG(0x0000)
+#define DSI_SYSCONFIG			DSI_REG(0x0010)
+#define DSI_SYSSTATUS			DSI_REG(0x0014)
+#define DSI_IRQSTATUS			DSI_REG(0x0018)
+#define DSI_IRQENABLE			DSI_REG(0x001C)
+#define DSI_CTRL			DSI_REG(0x0040)
+#define DSI_COMPLEXIO_CFG1		DSI_REG(0x0048)
+#define DSI_COMPLEXIO_IRQ_STATUS	DSI_REG(0x004C)
+#define DSI_COMPLEXIO_IRQ_ENABLE	DSI_REG(0x0050)
+#define DSI_CLK_CTRL			DSI_REG(0x0054)
+#define DSI_TIMING1			DSI_REG(0x0058)
+#define DSI_TIMING2			DSI_REG(0x005C)
+#define DSI_VM_TIMING1			DSI_REG(0x0060)
+#define DSI_VM_TIMING2			DSI_REG(0x0064)
+#define DSI_VM_TIMING3			DSI_REG(0x0068)
+#define DSI_CLK_TIMING			DSI_REG(0x006C)
+#define DSI_TX_FIFO_VC_SIZE		DSI_REG(0x0070)
+#define DSI_RX_FIFO_VC_SIZE		DSI_REG(0x0074)
+#define DSI_COMPLEXIO_CFG2		DSI_REG(0x0078)
+#define DSI_RX_FIFO_VC_FULLNESS		DSI_REG(0x007C)
+#define DSI_VM_TIMING4			DSI_REG(0x0080)
+#define DSI_TX_FIFO_VC_EMPTINESS	DSI_REG(0x0084)
+#define DSI_VM_TIMING5			DSI_REG(0x0088)
+#define DSI_VM_TIMING6			DSI_REG(0x008C)
+#define DSI_VM_TIMING7			DSI_REG(0x0090)
+#define DSI_STOPCLK_TIMING		DSI_REG(0x0094)
+#define DSI_VC_CTRL(n)			DSI_REG(0x0100 + (n * 0x20))
+#define DSI_VC_TE(n)			DSI_REG(0x0104 + (n * 0x20))
+#define DSI_VC_LONG_PACKET_HEADER(n)	DSI_REG(0x0108 + (n * 0x20))
+#define DSI_VC_LONG_PACKET_PAYLOAD(n)	DSI_REG(0x010C + (n * 0x20))
+#define DSI_VC_SHORT_PACKET_HEADER(n)	DSI_REG(0x0110 + (n * 0x20))
+#define DSI_VC_IRQSTATUS(n)		DSI_REG(0x0118 + (n * 0x20))
+#define DSI_VC_IRQENABLE(n)		DSI_REG(0x011C + (n * 0x20))
+
+/* DSIPHY_SCP */
+
+#define DSI_DSIPHY_CFG0			DSI_REG(0x200 + 0x0000)
+#define DSI_DSIPHY_CFG1			DSI_REG(0x200 + 0x0004)
+#define DSI_DSIPHY_CFG2			DSI_REG(0x200 + 0x0008)
+#define DSI_DSIPHY_CFG5			DSI_REG(0x200 + 0x0014)
+
+/* DSI_PLL_CTRL_SCP */
+
+#define DSI_PLL_CONTROL			DSI_REG(0x300 + 0x0000)
+#define DSI_PLL_STATUS			DSI_REG(0x300 + 0x0004)
+#define DSI_PLL_GO			DSI_REG(0x300 + 0x0008)
+#define DSI_PLL_CONFIGURATION1		DSI_REG(0x300 + 0x000C)
+#define DSI_PLL_CONFIGURATION2		DSI_REG(0x300 + 0x0010)
+
+#define REG_GET(idx, start, end) \
+	FLD_GET(dsi_read_reg(idx), start, end)
+
+#define REG_FLD_MOD(idx, val, start, end) \
+	dsi_write_reg(idx, FLD_MOD(dsi_read_reg(idx), val, start, end))
+
+/* Global interrupts */
+#define DSI_IRQ_VC0		(1 << 0)
+#define DSI_IRQ_VC1		(1 << 1)
+#define DSI_IRQ_VC2		(1 << 2)
+#define DSI_IRQ_VC3		(1 << 3)
+#define DSI_IRQ_WAKEUP		(1 << 4)
+#define DSI_IRQ_RESYNC		(1 << 5)
+#define DSI_IRQ_PLL_LOCK	(1 << 7)
+#define DSI_IRQ_PLL_UNLOCK	(1 << 8)
+#define DSI_IRQ_PLL_RECALL	(1 << 9)
+#define DSI_IRQ_COMPLEXIO_ERR	(1 << 10)
+#define DSI_IRQ_HS_TX_TIMEOUT	(1 << 14)
+#define DSI_IRQ_LP_RX_TIMEOUT	(1 << 15)
+#define DSI_IRQ_TE_TRIGGER	(1 << 16)
+#define DSI_IRQ_ACK_TRIGGER	(1 << 17)
+#define DSI_IRQ_SYNC_LOST	(1 << 18)
+#define DSI_IRQ_LDO_POWER_GOOD	(1 << 19)
+#define DSI_IRQ_TA_TIMEOUT	(1 << 20)
+#define DSI_IRQ_ERROR_MASK \
+	(DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \
+	DSI_IRQ_TA_TIMEOUT)
+#define DSI_IRQ_CHANNEL_MASK	0xf
+
+/* Virtual channel interrupts */
+#define DSI_VC_IRQ_CS		(1 << 0)
+#define DSI_VC_IRQ_ECC_CORR	(1 << 1)
+#define DSI_VC_IRQ_PACKET_SENT	(1 << 2)
+#define DSI_VC_IRQ_FIFO_TX_OVF	(1 << 3)
+#define DSI_VC_IRQ_FIFO_RX_OVF	(1 << 4)
+#define DSI_VC_IRQ_BTA		(1 << 5)
+#define DSI_VC_IRQ_ECC_NO_CORR	(1 << 6)
+#define DSI_VC_IRQ_FIFO_TX_UDF	(1 << 7)
+#define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8)
+#define DSI_VC_IRQ_ERROR_MASK \
+	(DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \
+	DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \
+	DSI_VC_IRQ_FIFO_TX_UDF)
+
+/* ComplexIO interrupts */
+#define DSI_CIO_IRQ_ERRSYNCESC1		(1 << 0)
+#define DSI_CIO_IRQ_ERRSYNCESC2		(1 << 1)
+#define DSI_CIO_IRQ_ERRSYNCESC3		(1 << 2)
+#define DSI_CIO_IRQ_ERRESC1		(1 << 5)
+#define DSI_CIO_IRQ_ERRESC2		(1 << 6)
+#define DSI_CIO_IRQ_ERRESC3		(1 << 7)
+#define DSI_CIO_IRQ_ERRCONTROL1		(1 << 10)
+#define DSI_CIO_IRQ_ERRCONTROL2		(1 << 11)
+#define DSI_CIO_IRQ_ERRCONTROL3		(1 << 12)
+#define DSI_CIO_IRQ_STATEULPS1		(1 << 15)
+#define DSI_CIO_IRQ_STATEULPS2		(1 << 16)
+#define DSI_CIO_IRQ_STATEULPS3		(1 << 17)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_1	(1 << 20)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_1	(1 << 21)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_2	(1 << 22)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_2	(1 << 23)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_3	(1 << 24)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3	(1 << 25)
+#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0	(1 << 30)
+#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1	(1 << 31)
+
+#define DSI_DT_DCS_SHORT_WRITE_0	0x05
+#define DSI_DT_DCS_SHORT_WRITE_1	0x15
+#define DSI_DT_DCS_READ			0x06
+#define DSI_DT_SET_MAX_RET_PKG_SIZE	0x37
+#define DSI_DT_NULL_PACKET		0x09
+#define DSI_DT_DCS_LONG_WRITE		0x39
+
+#define DSI_DT_RX_ACK_WITH_ERR		0x02
+#define DSI_DT_RX_DCS_LONG_READ		0x1c
+#define DSI_DT_RX_SHORT_READ_1		0x21
+#define DSI_DT_RX_SHORT_READ_2		0x22
+
+#define FINT_MAX 2100000
+#define FINT_MIN 750000
+#define REGN_MAX (1 << 7)
+#define REGM_MAX ((1 << 11) - 1)
+#define REGM3_MAX (1 << 4)
+#define REGM4_MAX (1 << 4)
+#define LP_DIV_MAX ((1 << 13) - 1)
+
+enum fifo_size {
+	DSI_FIFO_SIZE_0		= 0,
+	DSI_FIFO_SIZE_32	= 1,
+	DSI_FIFO_SIZE_64	= 2,
+	DSI_FIFO_SIZE_96	= 3,
+	DSI_FIFO_SIZE_128	= 4,
+};
+
+enum dsi_vc_mode {
+	DSI_VC_MODE_L4 = 0,
+	DSI_VC_MODE_VP,
+};
+
+struct dsi_update_region {
+	bool dirty;
+	u16 x, y, w, h;
+	struct omap_dss_device *device;
+};
+
+static struct
+{
+	void __iomem	*base;
+
+	struct dsi_clock_info current_cinfo;
+
+	struct regulator *vdds_dsi_reg;
+
+	struct {
+		enum dsi_vc_mode mode;
+		struct omap_dss_device *dssdev;
+		enum fifo_size fifo_size;
+		int dest_per;	/* destination peripheral 0-3 */
+	} vc[4];
+
+	struct mutex lock;
+	struct mutex bus_lock;
+
+	unsigned pll_locked;
+
+	struct completion bta_completion;
+
+	struct task_struct *thread;
+	wait_queue_head_t waitqueue;
+
+	spinlock_t update_lock;
+	bool framedone_received;
+	struct dsi_update_region update_region;
+	struct dsi_update_region active_update_region;
+	struct completion update_completion;
+
+	enum omap_dss_update_mode user_update_mode;
+	enum omap_dss_update_mode update_mode;
+	bool te_enabled;
+	bool use_ext_te;
+
+#ifdef DSI_CATCH_MISSING_TE
+	struct timer_list te_timer;
+#endif
+
+	unsigned long cache_req_pck;
+	unsigned long cache_clk_freq;
+	struct dsi_clock_info cache_cinfo;
+
+	u32		errors;
+	spinlock_t	errors_lock;
+#ifdef DEBUG
+	ktime_t perf_setup_time;
+	ktime_t perf_start_time;
+	ktime_t perf_start_time_auto;
+	int perf_measure_frames;
+#endif
+	int debug_read;
+	int debug_write;
+} dsi;
+
+#ifdef DEBUG
+static unsigned int dsi_perf;
+module_param_named(dsi_perf, dsi_perf, bool, 0644);
+#endif
+
+static inline void dsi_write_reg(const struct dsi_reg idx, u32 val)
+{
+	__raw_writel(val, dsi.base + idx.idx);
+}
+
+static inline u32 dsi_read_reg(const struct dsi_reg idx)
+{
+	return __raw_readl(dsi.base + idx.idx);
+}
+
+
+void dsi_save_context(void)
+{
+}
+
+void dsi_restore_context(void)
+{
+}
+
+void dsi_bus_lock(void)
+{
+	mutex_lock(&dsi.bus_lock);
+}
+EXPORT_SYMBOL(dsi_bus_lock);
+
+void dsi_bus_unlock(void)
+{
+	mutex_unlock(&dsi.bus_lock);
+}
+EXPORT_SYMBOL(dsi_bus_unlock);
+
+static inline int wait_for_bit_change(const struct dsi_reg idx, int bitnum,
+		int value)
+{
+	int t = 100000;
+
+	while (REG_GET(idx, bitnum, bitnum) != value) {
+		if (--t == 0)
+			return !value;
+	}
+
+	return value;
+}
+
+#ifdef DEBUG
+static void dsi_perf_mark_setup(void)
+{
+	dsi.perf_setup_time = ktime_get();
+}
+
+static void dsi_perf_mark_start(void)
+{
+	dsi.perf_start_time = ktime_get();
+}
+
+static void dsi_perf_mark_start_auto(void)
+{
+	dsi.perf_measure_frames = 0;
+	dsi.perf_start_time_auto = ktime_get();
+}
+
+static void dsi_perf_show(const char *name)
+{
+	ktime_t t, setup_time, trans_time;
+	u32 total_bytes;
+	u32 setup_us, trans_us, total_us;
+
+	if (!dsi_perf)
+		return;
+
+	if (dsi.update_mode == OMAP_DSS_UPDATE_DISABLED)
+		return;
+
+	t = ktime_get();
+
+	setup_time = ktime_sub(dsi.perf_start_time, dsi.perf_setup_time);
+	setup_us = (u32)ktime_to_us(setup_time);
+	if (setup_us == 0)
+		setup_us = 1;
+
+	trans_time = ktime_sub(t, dsi.perf_start_time);
+	trans_us = (u32)ktime_to_us(trans_time);
+	if (trans_us == 0)
+		trans_us = 1;
+
+	total_us = setup_us + trans_us;
+
+	total_bytes = dsi.active_update_region.w *
+		dsi.active_update_region.h *
+		dsi.active_update_region.device->ctrl.pixel_size / 8;
+
+	if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO) {
+		static u32 s_total_trans_us, s_total_setup_us;
+		static u32 s_min_trans_us = 0xffffffff, s_min_setup_us;
+		static u32 s_max_trans_us, s_max_setup_us;
+		const int numframes = 100;
+		ktime_t total_time_auto;
+		u32 total_time_auto_us;
+
+		dsi.perf_measure_frames++;
+
+		if (setup_us < s_min_setup_us)
+			s_min_setup_us = setup_us;
+
+		if (setup_us > s_max_setup_us)
+			s_max_setup_us = setup_us;
+
+		s_total_setup_us += setup_us;
+
+		if (trans_us < s_min_trans_us)
+			s_min_trans_us = trans_us;
+
+		if (trans_us > s_max_trans_us)
+			s_max_trans_us = trans_us;
+
+		s_total_trans_us += trans_us;
+
+		if (dsi.perf_measure_frames < numframes)
+			return;
+
+		total_time_auto = ktime_sub(t, dsi.perf_start_time_auto);
+		total_time_auto_us = (u32)ktime_to_us(total_time_auto);
+
+		printk(KERN_INFO "DSI(%s): %u fps, setup %u/%u/%u, "
+				"trans %u/%u/%u\n",
+				name,
+				1000 * 1000 * numframes / total_time_auto_us,
+				s_min_setup_us,
+				s_max_setup_us,
+				s_total_setup_us / numframes,
+				s_min_trans_us,
+				s_max_trans_us,
+				s_total_trans_us / numframes);
+
+		s_total_setup_us = 0;
+		s_min_setup_us = 0xffffffff;
+		s_max_setup_us = 0;
+		s_total_trans_us = 0;
+		s_min_trans_us = 0xffffffff;
+		s_max_trans_us = 0;
+		dsi_perf_mark_start_auto();
+	} else {
+		printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), "
+				"%u bytes, %u kbytes/sec\n",
+				name,
+				setup_us,
+				trans_us,
+				total_us,
+				1000*1000 / total_us,
+				total_bytes,
+				total_bytes * 1000 / total_us);
+	}
+}
+#else
+#define dsi_perf_mark_setup()
+#define dsi_perf_mark_start()
+#define dsi_perf_mark_start_auto()
+#define dsi_perf_show(x)
+#endif
+
+static void print_irq_status(u32 status)
+{
+#ifndef VERBOSE_IRQ
+	if ((status & ~DSI_IRQ_CHANNEL_MASK) == 0)
+		return;
+#endif
+	printk(KERN_DEBUG "DSI IRQ: 0x%x: ", status);
+
+#define PIS(x) \
+	if (status & DSI_IRQ_##x) \
+		printk(#x " ");
+#ifdef VERBOSE_IRQ
+	PIS(VC0);
+	PIS(VC1);
+	PIS(VC2);
+	PIS(VC3);
+#endif
+	PIS(WAKEUP);
+	PIS(RESYNC);
+	PIS(PLL_LOCK);
+	PIS(PLL_UNLOCK);
+	PIS(PLL_RECALL);
+	PIS(COMPLEXIO_ERR);
+	PIS(HS_TX_TIMEOUT);
+	PIS(LP_RX_TIMEOUT);
+	PIS(TE_TRIGGER);
+	PIS(ACK_TRIGGER);
+	PIS(SYNC_LOST);
+	PIS(LDO_POWER_GOOD);
+	PIS(TA_TIMEOUT);
+#undef PIS
+
+	printk("\n");
+}
+
+static void print_irq_status_vc(int channel, u32 status)
+{
+#ifndef VERBOSE_IRQ
+	if ((status & ~DSI_VC_IRQ_PACKET_SENT) == 0)
+		return;
+#endif
+	printk(KERN_DEBUG "DSI VC(%d) IRQ 0x%x: ", channel, status);
+
+#define PIS(x) \
+	if (status & DSI_VC_IRQ_##x) \
+		printk(#x " ");
+	PIS(CS);
+	PIS(ECC_CORR);
+#ifdef VERBOSE_IRQ
+	PIS(PACKET_SENT);
+#endif
+	PIS(FIFO_TX_OVF);
+	PIS(FIFO_RX_OVF);
+	PIS(BTA);
+	PIS(ECC_NO_CORR);
+	PIS(FIFO_TX_UDF);
+	PIS(PP_BUSY_CHANGE);
+#undef PIS
+	printk("\n");
+}
+
+static void print_irq_status_cio(u32 status)
+{
+	printk(KERN_DEBUG "DSI CIO IRQ 0x%x: ", status);
+
+#define PIS(x) \
+	if (status & DSI_CIO_IRQ_##x) \
+		printk(#x " ");
+	PIS(ERRSYNCESC1);
+	PIS(ERRSYNCESC2);
+	PIS(ERRSYNCESC3);
+	PIS(ERRESC1);
+	PIS(ERRESC2);
+	PIS(ERRESC3);
+	PIS(ERRCONTROL1);
+	PIS(ERRCONTROL2);
+	PIS(ERRCONTROL3);
+	PIS(STATEULPS1);
+	PIS(STATEULPS2);
+	PIS(STATEULPS3);
+	PIS(ERRCONTENTIONLP0_1);
+	PIS(ERRCONTENTIONLP1_1);
+	PIS(ERRCONTENTIONLP0_2);
+	PIS(ERRCONTENTIONLP1_2);
+	PIS(ERRCONTENTIONLP0_3);
+	PIS(ERRCONTENTIONLP1_3);
+	PIS(ULPSACTIVENOT_ALL0);
+	PIS(ULPSACTIVENOT_ALL1);
+#undef PIS
+
+	printk("\n");
+}
+
+static int debug_irq;
+
+/* called from dss */
+void dsi_irq_handler(void)
+{
+	u32 irqstatus, vcstatus, ciostatus;
+	int i;
+
+	irqstatus = dsi_read_reg(DSI_IRQSTATUS);
+
+	if (irqstatus & DSI_IRQ_ERROR_MASK) {
+		DSSERR("DSI error, irqstatus %x\n", irqstatus);
+		print_irq_status(irqstatus);
+		spin_lock(&dsi.errors_lock);
+		dsi.errors |= irqstatus & DSI_IRQ_ERROR_MASK;
+		spin_unlock(&dsi.errors_lock);
+	} else if (debug_irq) {
+		print_irq_status(irqstatus);
+	}
+
+#ifdef DSI_CATCH_MISSING_TE
+	if (irqstatus & DSI_IRQ_TE_TRIGGER)
+		del_timer(&dsi.te_timer);
+#endif
+
+	for (i = 0; i < 4; ++i) {
+		if ((irqstatus & (1<<i)) == 0)
+			continue;
+
+		vcstatus = dsi_read_reg(DSI_VC_IRQSTATUS(i));
+
+		if (vcstatus & DSI_VC_IRQ_BTA)
+			complete(&dsi.bta_completion);
+
+		if (vcstatus & DSI_VC_IRQ_ERROR_MASK) {
+			DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
+				       i, vcstatus);
+			print_irq_status_vc(i, vcstatus);
+		} else if (debug_irq) {
+			print_irq_status_vc(i, vcstatus);
+		}
+
+		dsi_write_reg(DSI_VC_IRQSTATUS(i), vcstatus);
+		/* flush posted write */
+		dsi_read_reg(DSI_VC_IRQSTATUS(i));
+	}
+
+	if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
+		ciostatus = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
+
+		dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
+		/* flush posted write */
+		dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
+
+		DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
+		print_irq_status_cio(ciostatus);
+	}
+
+	dsi_write_reg(DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
+	/* flush posted write */
+	dsi_read_reg(DSI_IRQSTATUS);
+}
+
+
+static void _dsi_initialize_irq(void)
+{
+	u32 l;
+	int i;
+
+	/* disable all interrupts */
+	dsi_write_reg(DSI_IRQENABLE, 0);
+	for (i = 0; i < 4; ++i)
+		dsi_write_reg(DSI_VC_IRQENABLE(i), 0);
+	dsi_write_reg(DSI_COMPLEXIO_IRQ_ENABLE, 0);
+
+	/* clear interrupt status */
+	l = dsi_read_reg(DSI_IRQSTATUS);
+	dsi_write_reg(DSI_IRQSTATUS, l & ~DSI_IRQ_CHANNEL_MASK);
+
+	for (i = 0; i < 4; ++i) {
+		l = dsi_read_reg(DSI_VC_IRQSTATUS(i));
+		dsi_write_reg(DSI_VC_IRQSTATUS(i), l);
+	}
+
+	l = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
+	dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, l);
+
+	/* enable error irqs */
+	l = DSI_IRQ_ERROR_MASK;
+#ifdef DSI_CATCH_MISSING_TE
+	l |= DSI_IRQ_TE_TRIGGER;
+#endif
+	dsi_write_reg(DSI_IRQENABLE, l);
+
+	l = DSI_VC_IRQ_ERROR_MASK;
+	for (i = 0; i < 4; ++i)
+		dsi_write_reg(DSI_VC_IRQENABLE(i), l);
+
+	/* XXX zonda responds incorrectly, causing control error:
+	   Exit from LP-ESC mode to LP11 uses wrong transition states on the
+	   data lines LP0 and LN0. */
+	dsi_write_reg(DSI_COMPLEXIO_IRQ_ENABLE,
+			-1 & (~DSI_CIO_IRQ_ERRCONTROL2));
+}
+
+static u32 dsi_get_errors(void)
+{
+	unsigned long flags;
+	u32 e;
+	spin_lock_irqsave(&dsi.errors_lock, flags);
+	e = dsi.errors;
+	dsi.errors = 0;
+	spin_unlock_irqrestore(&dsi.errors_lock, flags);
+	return e;
+}
+
+static void dsi_vc_enable_bta_irq(int channel)
+{
+	u32 l;
+
+	dsi_write_reg(DSI_VC_IRQSTATUS(channel), DSI_VC_IRQ_BTA);
+
+	l = dsi_read_reg(DSI_VC_IRQENABLE(channel));
+	l |= DSI_VC_IRQ_BTA;
+	dsi_write_reg(DSI_VC_IRQENABLE(channel), l);
+}
+
+static void dsi_vc_disable_bta_irq(int channel)
+{
+	u32 l;
+
+	l = dsi_read_reg(DSI_VC_IRQENABLE(channel));
+	l &= ~DSI_VC_IRQ_BTA;
+	dsi_write_reg(DSI_VC_IRQENABLE(channel), l);
+}
+
+/* DSI func clock. this could also be DSI2_PLL_FCLK */
+static inline void enable_clocks(bool enable)
+{
+	if (enable)
+		dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+	else
+		dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+}
+
+/* source clock for DSI PLL. this could also be PCLKFREE */
+static inline void dsi_enable_pll_clock(bool enable)
+{
+	if (enable)
+		dss_clk_enable(DSS_CLK_FCK2);
+	else
+		dss_clk_disable(DSS_CLK_FCK2);
+
+	if (enable && dsi.pll_locked) {
+		if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1)
+			DSSERR("cannot lock PLL when enabling clocks\n");
+	}
+}
+
+#ifdef DEBUG
+static void _dsi_print_reset_status(void)
+{
+	u32 l;
+
+	if (!dss_debug)
+		return;
+
+	/* A dummy read using the SCP interface to any DSIPHY register is
+	 * required after DSIPHY reset to complete the reset of the DSI complex
+	 * I/O. */
+	l = dsi_read_reg(DSI_DSIPHY_CFG5);
+
+	printk(KERN_DEBUG "DSI resets: ");
+
+	l = dsi_read_reg(DSI_PLL_STATUS);
+	printk("PLL (%d) ", FLD_GET(l, 0, 0));
+
+	l = dsi_read_reg(DSI_COMPLEXIO_CFG1);
+	printk("CIO (%d) ", FLD_GET(l, 29, 29));
+
+	l = dsi_read_reg(DSI_DSIPHY_CFG5);
+	printk("PHY (%x, %d, %d, %d)\n",
+			FLD_GET(l, 28, 26),
+			FLD_GET(l, 29, 29),
+			FLD_GET(l, 30, 30),
+			FLD_GET(l, 31, 31));
+}
+#else
+#define _dsi_print_reset_status()
+#endif
+
+static inline int dsi_if_enable(bool enable)
+{
+	DSSDBG("dsi_if_enable(%d)\n", enable);
+
+	enable = enable ? 1 : 0;
+	REG_FLD_MOD(DSI_CTRL, enable, 0, 0); /* IF_EN */
+
+	if (wait_for_bit_change(DSI_CTRL, 0, enable) != enable) {
+			DSSERR("Failed to set dsi_if_enable to %d\n", enable);
+			return -EIO;
+	}
+
+	return 0;
+}
+
+unsigned long dsi_get_dsi1_pll_rate(void)
+{
+	return dsi.current_cinfo.dsi1_pll_fclk;
+}
+
+static unsigned long dsi_get_dsi2_pll_rate(void)
+{
+	return dsi.current_cinfo.dsi2_pll_fclk;
+}
+
+static unsigned long dsi_get_txbyteclkhs(void)
+{
+	return dsi.current_cinfo.clkin4ddr / 16;
+}
+
+static unsigned long dsi_fclk_rate(void)
+{
+	unsigned long r;
+
+	if (dss_get_dsi_clk_source() == 0) {
+		/* DSI FCLK source is DSS1_ALWON_FCK, which is dss1_fck */
+		r = dss_clk_get_rate(DSS_CLK_FCK1);
+	} else {
+		/* DSI FCLK source is DSI2_PLL_FCLK */
+		r = dsi_get_dsi2_pll_rate();
+	}
+
+	return r;
+}
+
+static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev)
+{
+	unsigned long dsi_fclk;
+	unsigned lp_clk_div;
+	unsigned long lp_clk;
+
+	lp_clk_div = dssdev->phy.dsi.div.lp_clk_div;
+
+	if (lp_clk_div == 0 || lp_clk_div > LP_DIV_MAX)
+		return -EINVAL;
+
+	dsi_fclk = dsi_fclk_rate();
+
+	lp_clk = dsi_fclk / 2 / lp_clk_div;
+
+	DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk);
+	dsi.current_cinfo.lp_clk = lp_clk;
+	dsi.current_cinfo.lp_clk_div = lp_clk_div;
+
+	REG_FLD_MOD(DSI_CLK_CTRL, lp_clk_div, 12, 0);   /* LP_CLK_DIVISOR */
+
+	REG_FLD_MOD(DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0,
+			21, 21);		/* LP_RX_SYNCHRO_ENABLE */
+
+	return 0;
+}
+
+
+enum dsi_pll_power_state {
+	DSI_PLL_POWER_OFF	= 0x0,
+	DSI_PLL_POWER_ON_HSCLK	= 0x1,
+	DSI_PLL_POWER_ON_ALL	= 0x2,
+	DSI_PLL_POWER_ON_DIV	= 0x3,
+};
+
+static int dsi_pll_power(enum dsi_pll_power_state state)
+{
+	int t = 0;
+
+	REG_FLD_MOD(DSI_CLK_CTRL, state, 31, 30);	/* PLL_PWR_CMD */
+
+	/* PLL_PWR_STATUS */
+	while (FLD_GET(dsi_read_reg(DSI_CLK_CTRL), 29, 28) != state) {
+		udelay(1);
+		if (t++ > 1000) {
+			DSSERR("Failed to set DSI PLL power mode to %d\n",
+					state);
+			return -ENODEV;
+		}
+	}
+
+	return 0;
+}
+
+/* calculate clock rates using dividers in cinfo */
+static int dsi_calc_clock_rates(struct dsi_clock_info *cinfo)
+{
+	if (cinfo->regn == 0 || cinfo->regn > REGN_MAX)
+		return -EINVAL;
+
+	if (cinfo->regm == 0 || cinfo->regm > REGM_MAX)
+		return -EINVAL;
+
+	if (cinfo->regm3 > REGM3_MAX)
+		return -EINVAL;
+
+	if (cinfo->regm4 > REGM4_MAX)
+		return -EINVAL;
+
+	if (cinfo->use_dss2_fck) {
+		cinfo->clkin = dss_clk_get_rate(DSS_CLK_FCK2);
+		/* XXX it is unclear if highfreq should be used
+		 * with DSS2_FCK source also */
+		cinfo->highfreq = 0;
+	} else {
+		cinfo->clkin = dispc_pclk_rate();
+
+		if (cinfo->clkin < 32000000)
+			cinfo->highfreq = 0;
+		else
+			cinfo->highfreq = 1;
+	}
+
+	cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1));
+
+	if (cinfo->fint > FINT_MAX || cinfo->fint < FINT_MIN)
+		return -EINVAL;
+
+	cinfo->clkin4ddr = 2 * cinfo->regm * cinfo->fint;
+
+	if (cinfo->clkin4ddr > 1800 * 1000 * 1000)
+		return -EINVAL;
+
+	if (cinfo->regm3 > 0)
+		cinfo->dsi1_pll_fclk = cinfo->clkin4ddr / cinfo->regm3;
+	else
+		cinfo->dsi1_pll_fclk = 0;
+
+	if (cinfo->regm4 > 0)
+		cinfo->dsi2_pll_fclk = cinfo->clkin4ddr / cinfo->regm4;
+	else
+		cinfo->dsi2_pll_fclk = 0;
+
+	return 0;
+}
+
+int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck,
+		struct dsi_clock_info *dsi_cinfo,
+		struct dispc_clock_info *dispc_cinfo)
+{
+	struct dsi_clock_info cur, best;
+	struct dispc_clock_info best_dispc;
+	int min_fck_per_pck;
+	int match = 0;
+	unsigned long dss_clk_fck2;
+
+	dss_clk_fck2 = dss_clk_get_rate(DSS_CLK_FCK2);
+
+	if (req_pck == dsi.cache_req_pck &&
+			dsi.cache_cinfo.clkin == dss_clk_fck2) {
+		DSSDBG("DSI clock info found from cache\n");
+		*dsi_cinfo = dsi.cache_cinfo;
+		dispc_find_clk_divs(is_tft, req_pck, dsi_cinfo->dsi1_pll_fclk,
+				dispc_cinfo);
+		return 0;
+	}
+
+	min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
+
+	if (min_fck_per_pck &&
+		req_pck * min_fck_per_pck > DISPC_MAX_FCK) {
+		DSSERR("Requested pixel clock not possible with the current "
+				"OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
+				"the constraint off.\n");
+		min_fck_per_pck = 0;
+	}
+
+	DSSDBG("dsi_pll_calc\n");
+
+retry:
+	memset(&best, 0, sizeof(best));
+	memset(&best_dispc, 0, sizeof(best_dispc));
+
+	memset(&cur, 0, sizeof(cur));
+	cur.clkin = dss_clk_fck2;
+	cur.use_dss2_fck = 1;
+	cur.highfreq = 0;
+
+	/* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */
+	/* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */
+	/* To reduce PLL lock time, keep Fint high (around 2 MHz) */
+	for (cur.regn = 1; cur.regn < REGN_MAX; ++cur.regn) {
+		if (cur.highfreq == 0)
+			cur.fint = cur.clkin / cur.regn;
+		else
+			cur.fint = cur.clkin / (2 * cur.regn);
+
+		if (cur.fint > FINT_MAX || cur.fint < FINT_MIN)
+			continue;
+
+		/* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */
+		for (cur.regm = 1; cur.regm < REGM_MAX; ++cur.regm) {
+			unsigned long a, b;
+
+			a = 2 * cur.regm * (cur.clkin/1000);
+			b = cur.regn * (cur.highfreq + 1);
+			cur.clkin4ddr = a / b * 1000;
+
+			if (cur.clkin4ddr > 1800 * 1000 * 1000)
+				break;
+
+			/* DSI1_PLL_FCLK(MHz) = DSIPHY(MHz) / regm3  < 173MHz */
+			for (cur.regm3 = 1; cur.regm3 < REGM3_MAX;
+					++cur.regm3) {
+				struct dispc_clock_info cur_dispc;
+				cur.dsi1_pll_fclk = cur.clkin4ddr / cur.regm3;
+
+				/* this will narrow down the search a bit,
+				 * but still give pixclocks below what was
+				 * requested */
+				if (cur.dsi1_pll_fclk  < req_pck)
+					break;
+
+				if (cur.dsi1_pll_fclk > DISPC_MAX_FCK)
+					continue;
+
+				if (min_fck_per_pck &&
+					cur.dsi1_pll_fclk <
+						req_pck * min_fck_per_pck)
+					continue;
+
+				match = 1;
+
+				dispc_find_clk_divs(is_tft, req_pck,
+						cur.dsi1_pll_fclk,
+						&cur_dispc);
+
+				if (abs(cur_dispc.pck - req_pck) <
+						abs(best_dispc.pck - req_pck)) {
+					best = cur;
+					best_dispc = cur_dispc;
+
+					if (cur_dispc.pck == req_pck)
+						goto found;
+				}
+			}
+		}
+	}
+found:
+	if (!match) {
+		if (min_fck_per_pck) {
+			DSSERR("Could not find suitable clock settings.\n"
+					"Turning FCK/PCK constraint off and"
+					"trying again.\n");
+			min_fck_per_pck = 0;
+			goto retry;
+		}
+
+		DSSERR("Could not find suitable clock settings.\n");
+
+		return -EINVAL;
+	}
+
+	/* DSI2_PLL_FCLK (regm4) is not used */
+	best.regm4 = 0;
+	best.dsi2_pll_fclk = 0;
+
+	if (dsi_cinfo)
+		*dsi_cinfo = best;
+	if (dispc_cinfo)
+		*dispc_cinfo = best_dispc;
+
+	dsi.cache_req_pck = req_pck;
+	dsi.cache_clk_freq = 0;
+	dsi.cache_cinfo = best;
+
+	return 0;
+}
+
+int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo)
+{
+	int r = 0;
+	u32 l;
+	int f;
+
+	DSSDBGF();
+
+	dsi.current_cinfo.fint = cinfo->fint;
+	dsi.current_cinfo.clkin4ddr = cinfo->clkin4ddr;
+	dsi.current_cinfo.dsi1_pll_fclk = cinfo->dsi1_pll_fclk;
+	dsi.current_cinfo.dsi2_pll_fclk = cinfo->dsi2_pll_fclk;
+
+	dsi.current_cinfo.regn = cinfo->regn;
+	dsi.current_cinfo.regm = cinfo->regm;
+	dsi.current_cinfo.regm3 = cinfo->regm3;
+	dsi.current_cinfo.regm4 = cinfo->regm4;
+
+	DSSDBG("DSI Fint %ld\n", cinfo->fint);
+
+	DSSDBG("clkin (%s) rate %ld, highfreq %d\n",
+			cinfo->use_dss2_fck ? "dss2_fck" : "pclkfree",
+			cinfo->clkin,
+			cinfo->highfreq);
+
+	/* DSIPHY == CLKIN4DDR */
+	DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu / %d = %lu\n",
+			cinfo->regm,
+			cinfo->regn,
+			cinfo->clkin,
+			cinfo->highfreq + 1,
+			cinfo->clkin4ddr);
+
+	DSSDBG("Data rate on 1 DSI lane %ld Mbps\n",
+			cinfo->clkin4ddr / 1000 / 1000 / 2);
+
+	DSSDBG("Clock lane freq %ld Hz\n", cinfo->clkin4ddr / 4);
+
+	DSSDBG("regm3 = %d, dsi1_pll_fclk = %lu\n",
+			cinfo->regm3, cinfo->dsi1_pll_fclk);
+	DSSDBG("regm4 = %d, dsi2_pll_fclk = %lu\n",
+			cinfo->regm4, cinfo->dsi2_pll_fclk);
+
+	REG_FLD_MOD(DSI_PLL_CONTROL, 0, 0, 0); /* DSI_PLL_AUTOMODE = manual */
+
+	l = dsi_read_reg(DSI_PLL_CONFIGURATION1);
+	l = FLD_MOD(l, 1, 0, 0);		/* DSI_PLL_STOPMODE */
+	l = FLD_MOD(l, cinfo->regn - 1, 7, 1);	/* DSI_PLL_REGN */
+	l = FLD_MOD(l, cinfo->regm, 18, 8);	/* DSI_PLL_REGM */
+	l = FLD_MOD(l, cinfo->regm3 > 0 ? cinfo->regm3 - 1 : 0,
+			22, 19);		/* DSI_CLOCK_DIV */
+	l = FLD_MOD(l, cinfo->regm4 > 0 ? cinfo->regm4 - 1 : 0,
+			26, 23);		/* DSIPROTO_CLOCK_DIV */
+	dsi_write_reg(DSI_PLL_CONFIGURATION1, l);
+
+	BUG_ON(cinfo->fint < 750000 || cinfo->fint > 2100000);
+	if (cinfo->fint < 1000000)
+		f = 0x3;
+	else if (cinfo->fint < 1250000)
+		f = 0x4;
+	else if (cinfo->fint < 1500000)
+		f = 0x5;
+	else if (cinfo->fint < 1750000)
+		f = 0x6;
+	else
+		f = 0x7;
+
+	l = dsi_read_reg(DSI_PLL_CONFIGURATION2);
+	l = FLD_MOD(l, f, 4, 1);		/* DSI_PLL_FREQSEL */
+	l = FLD_MOD(l, cinfo->use_dss2_fck ? 0 : 1,
+			11, 11);		/* DSI_PLL_CLKSEL */
+	l = FLD_MOD(l, cinfo->highfreq,
+			12, 12);		/* DSI_PLL_HIGHFREQ */
+	l = FLD_MOD(l, 1, 13, 13);		/* DSI_PLL_REFEN */
+	l = FLD_MOD(l, 0, 14, 14);		/* DSIPHY_CLKINEN */
+	l = FLD_MOD(l, 1, 20, 20);		/* DSI_HSDIVBYPASS */
+	dsi_write_reg(DSI_PLL_CONFIGURATION2, l);
+
+	REG_FLD_MOD(DSI_PLL_GO, 1, 0, 0);	/* DSI_PLL_GO */
+
+	if (wait_for_bit_change(DSI_PLL_GO, 0, 0) != 0) {
+		DSSERR("dsi pll go bit not going down.\n");
+		r = -EIO;
+		goto err;
+	}
+
+	if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1) {
+		DSSERR("cannot lock PLL\n");
+		r = -EIO;
+		goto err;
+	}
+
+	dsi.pll_locked = 1;
+
+	l = dsi_read_reg(DSI_PLL_CONFIGURATION2);
+	l = FLD_MOD(l, 0, 0, 0);	/* DSI_PLL_IDLE */
+	l = FLD_MOD(l, 0, 5, 5);	/* DSI_PLL_PLLLPMODE */
+	l = FLD_MOD(l, 0, 6, 6);	/* DSI_PLL_LOWCURRSTBY */
+	l = FLD_MOD(l, 0, 7, 7);	/* DSI_PLL_TIGHTPHASELOCK */
+	l = FLD_MOD(l, 0, 8, 8);	/* DSI_PLL_DRIFTGUARDEN */
+	l = FLD_MOD(l, 0, 10, 9);	/* DSI_PLL_LOCKSEL */
+	l = FLD_MOD(l, 1, 13, 13);	/* DSI_PLL_REFEN */
+	l = FLD_MOD(l, 1, 14, 14);	/* DSIPHY_CLKINEN */
+	l = FLD_MOD(l, 0, 15, 15);	/* DSI_BYPASSEN */
+	l = FLD_MOD(l, 1, 16, 16);	/* DSS_CLOCK_EN */
+	l = FLD_MOD(l, 0, 17, 17);	/* DSS_CLOCK_PWDN */
+	l = FLD_MOD(l, 1, 18, 18);	/* DSI_PROTO_CLOCK_EN */
+	l = FLD_MOD(l, 0, 19, 19);	/* DSI_PROTO_CLOCK_PWDN */
+	l = FLD_MOD(l, 0, 20, 20);	/* DSI_HSDIVBYPASS */
+	dsi_write_reg(DSI_PLL_CONFIGURATION2, l);
+
+	DSSDBG("PLL config done\n");
+err:
+	return r;
+}
+
+int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk,
+		bool enable_hsdiv)
+{
+	int r = 0;
+	enum dsi_pll_power_state pwstate;
+
+	DSSDBG("PLL init\n");
+
+	enable_clocks(1);
+	dsi_enable_pll_clock(1);
+
+	r = regulator_enable(dsi.vdds_dsi_reg);
+	if (r)
+		goto err0;
+
+	/* XXX PLL does not come out of reset without this... */
+	dispc_pck_free_enable(1);
+
+	if (wait_for_bit_change(DSI_PLL_STATUS, 0, 1) != 1) {
+		DSSERR("PLL not coming out of reset.\n");
+		r = -ENODEV;
+		goto err1;
+	}
+
+	/* XXX ... but if left on, we get problems when planes do not
+	 * fill the whole display. No idea about this */
+	dispc_pck_free_enable(0);
+
+	if (enable_hsclk && enable_hsdiv)
+		pwstate = DSI_PLL_POWER_ON_ALL;
+	else if (enable_hsclk)
+		pwstate = DSI_PLL_POWER_ON_HSCLK;
+	else if (enable_hsdiv)
+		pwstate = DSI_PLL_POWER_ON_DIV;
+	else
+		pwstate = DSI_PLL_POWER_OFF;
+
+	r = dsi_pll_power(pwstate);
+
+	if (r)
+		goto err1;
+
+	DSSDBG("PLL init done\n");
+
+	return 0;
+err1:
+	regulator_disable(dsi.vdds_dsi_reg);
+err0:
+	enable_clocks(0);
+	dsi_enable_pll_clock(0);
+	return r;
+}
+
+void dsi_pll_uninit(void)
+{
+	enable_clocks(0);
+	dsi_enable_pll_clock(0);
+
+	dsi.pll_locked = 0;
+	dsi_pll_power(DSI_PLL_POWER_OFF);
+	regulator_disable(dsi.vdds_dsi_reg);
+	DSSDBG("PLL uninit done\n");
+}
+
+void dsi_dump_clocks(struct seq_file *s)
+{
+	int clksel;
+	struct dsi_clock_info *cinfo = &dsi.current_cinfo;
+
+	enable_clocks(1);
+
+	clksel = REG_GET(DSI_PLL_CONFIGURATION2, 11, 11);
+
+	seq_printf(s,	"- DSI PLL -\n");
+
+	seq_printf(s,	"dsi pll source = %s\n",
+			clksel == 0 ?
+			"dss2_alwon_fclk" : "pclkfree");
+
+	seq_printf(s,	"Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn);
+
+	seq_printf(s,	"CLKIN4DDR\t%-16luregm %u\n",
+			cinfo->clkin4ddr, cinfo->regm);
+
+	seq_printf(s,	"dsi1_pll_fck\t%-16luregm3 %u\t(%s)\n",
+			cinfo->dsi1_pll_fclk,
+			cinfo->regm3,
+			dss_get_dispc_clk_source() == 0 ? "off" : "on");
+
+	seq_printf(s,	"dsi2_pll_fck\t%-16luregm4 %u\t(%s)\n",
+			cinfo->dsi2_pll_fclk,
+			cinfo->regm4,
+			dss_get_dsi_clk_source() == 0 ? "off" : "on");
+
+	seq_printf(s,	"- DSI -\n");
+
+	seq_printf(s,	"dsi fclk source = %s\n",
+			dss_get_dsi_clk_source() == 0 ?
+			"dss1_alwon_fclk" : "dsi2_pll_fclk");
+
+	seq_printf(s,	"DSI_FCLK\t%lu\n", dsi_fclk_rate());
+
+	seq_printf(s,	"DDR_CLK\t\t%lu\n",
+			cinfo->clkin4ddr / 4);
+
+	seq_printf(s,	"TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs());
+
+	seq_printf(s,	"LP_CLK\t\t%lu\n", cinfo->lp_clk);
+
+	seq_printf(s,	"VP_CLK\t\t%lu\n"
+			"VP_PCLK\t\t%lu\n",
+			dispc_lclk_rate(),
+			dispc_pclk_rate());
+
+	enable_clocks(0);
+}
+
+void dsi_dump_regs(struct seq_file *s)
+{
+#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(r))
+
+	dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+	DUMPREG(DSI_REVISION);
+	DUMPREG(DSI_SYSCONFIG);
+	DUMPREG(DSI_SYSSTATUS);
+	DUMPREG(DSI_IRQSTATUS);
+	DUMPREG(DSI_IRQENABLE);
+	DUMPREG(DSI_CTRL);
+	DUMPREG(DSI_COMPLEXIO_CFG1);
+	DUMPREG(DSI_COMPLEXIO_IRQ_STATUS);
+	DUMPREG(DSI_COMPLEXIO_IRQ_ENABLE);
+	DUMPREG(DSI_CLK_CTRL);
+	DUMPREG(DSI_TIMING1);
+	DUMPREG(DSI_TIMING2);
+	DUMPREG(DSI_VM_TIMING1);
+	DUMPREG(DSI_VM_TIMING2);
+	DUMPREG(DSI_VM_TIMING3);
+	DUMPREG(DSI_CLK_TIMING);
+	DUMPREG(DSI_TX_FIFO_VC_SIZE);
+	DUMPREG(DSI_RX_FIFO_VC_SIZE);
+	DUMPREG(DSI_COMPLEXIO_CFG2);
+	DUMPREG(DSI_RX_FIFO_VC_FULLNESS);
+	DUMPREG(DSI_VM_TIMING4);
+	DUMPREG(DSI_TX_FIFO_VC_EMPTINESS);
+	DUMPREG(DSI_VM_TIMING5);
+	DUMPREG(DSI_VM_TIMING6);
+	DUMPREG(DSI_VM_TIMING7);
+	DUMPREG(DSI_STOPCLK_TIMING);
+
+	DUMPREG(DSI_VC_CTRL(0));
+	DUMPREG(DSI_VC_TE(0));
+	DUMPREG(DSI_VC_LONG_PACKET_HEADER(0));
+	DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(0));
+	DUMPREG(DSI_VC_SHORT_PACKET_HEADER(0));
+	DUMPREG(DSI_VC_IRQSTATUS(0));
+	DUMPREG(DSI_VC_IRQENABLE(0));
+
+	DUMPREG(DSI_VC_CTRL(1));
+	DUMPREG(DSI_VC_TE(1));
+	DUMPREG(DSI_VC_LONG_PACKET_HEADER(1));
+	DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(1));
+	DUMPREG(DSI_VC_SHORT_PACKET_HEADER(1));
+	DUMPREG(DSI_VC_IRQSTATUS(1));
+	DUMPREG(DSI_VC_IRQENABLE(1));
+
+	DUMPREG(DSI_VC_CTRL(2));
+	DUMPREG(DSI_VC_TE(2));
+	DUMPREG(DSI_VC_LONG_PACKET_HEADER(2));
+	DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(2));
+	DUMPREG(DSI_VC_SHORT_PACKET_HEADER(2));
+	DUMPREG(DSI_VC_IRQSTATUS(2));
+	DUMPREG(DSI_VC_IRQENABLE(2));
+
+	DUMPREG(DSI_VC_CTRL(3));
+	DUMPREG(DSI_VC_TE(3));
+	DUMPREG(DSI_VC_LONG_PACKET_HEADER(3));
+	DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(3));
+	DUMPREG(DSI_VC_SHORT_PACKET_HEADER(3));
+	DUMPREG(DSI_VC_IRQSTATUS(3));
+	DUMPREG(DSI_VC_IRQENABLE(3));
+
+	DUMPREG(DSI_DSIPHY_CFG0);
+	DUMPREG(DSI_DSIPHY_CFG1);
+	DUMPREG(DSI_DSIPHY_CFG2);
+	DUMPREG(DSI_DSIPHY_CFG5);
+
+	DUMPREG(DSI_PLL_CONTROL);
+	DUMPREG(DSI_PLL_STATUS);
+	DUMPREG(DSI_PLL_GO);
+	DUMPREG(DSI_PLL_CONFIGURATION1);
+	DUMPREG(DSI_PLL_CONFIGURATION2);
+
+	dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+#undef DUMPREG
+}
+
+enum dsi_complexio_power_state {
+	DSI_COMPLEXIO_POWER_OFF		= 0x0,
+	DSI_COMPLEXIO_POWER_ON		= 0x1,
+	DSI_COMPLEXIO_POWER_ULPS	= 0x2,
+};
+
+static int dsi_complexio_power(enum dsi_complexio_power_state state)
+{
+	int t = 0;
+
+	/* PWR_CMD */
+	REG_FLD_MOD(DSI_COMPLEXIO_CFG1, state, 28, 27);
+
+	/* PWR_STATUS */
+	while (FLD_GET(dsi_read_reg(DSI_COMPLEXIO_CFG1), 26, 25) != state) {
+		udelay(1);
+		if (t++ > 1000) {
+			DSSERR("failed to set complexio power state to "
+					"%d\n", state);
+			return -ENODEV;
+		}
+	}
+
+	return 0;
+}
+
+static void dsi_complexio_config(struct omap_dss_device *dssdev)
+{
+	u32 r;
+
+	int clk_lane   = dssdev->phy.dsi.clk_lane;
+	int data1_lane = dssdev->phy.dsi.data1_lane;
+	int data2_lane = dssdev->phy.dsi.data2_lane;
+	int clk_pol    = dssdev->phy.dsi.clk_pol;
+	int data1_pol  = dssdev->phy.dsi.data1_pol;
+	int data2_pol  = dssdev->phy.dsi.data2_pol;
+
+	r = dsi_read_reg(DSI_COMPLEXIO_CFG1);
+	r = FLD_MOD(r, clk_lane, 2, 0);
+	r = FLD_MOD(r, clk_pol, 3, 3);
+	r = FLD_MOD(r, data1_lane, 6, 4);
+	r = FLD_MOD(r, data1_pol, 7, 7);
+	r = FLD_MOD(r, data2_lane, 10, 8);
+	r = FLD_MOD(r, data2_pol, 11, 11);
+	dsi_write_reg(DSI_COMPLEXIO_CFG1, r);
+
+	/* The configuration of the DSI complex I/O (number of data lanes,
+	   position, differential order) should not be changed while
+	   DSS.DSI_CLK_CRTRL[20] LP_CLK_ENABLE bit is set to 1. In order for
+	   the hardware to take into account a new configuration of the complex
+	   I/O (done in DSS.DSI_COMPLEXIO_CFG1 register), it is recommended to
+	   follow this sequence: First set the DSS.DSI_CTRL[0] IF_EN bit to 1,
+	   then reset the DSS.DSI_CTRL[0] IF_EN to 0, then set
+	   DSS.DSI_CLK_CTRL[20] LP_CLK_ENABLE to 1 and finally set again the
+	   DSS.DSI_CTRL[0] IF_EN bit to 1. If the sequence is not followed, the
+	   DSI complex I/O configuration is unknown. */
+
+	/*
+	REG_FLD_MOD(DSI_CTRL, 1, 0, 0);
+	REG_FLD_MOD(DSI_CTRL, 0, 0, 0);
+	REG_FLD_MOD(DSI_CLK_CTRL, 1, 20, 20);
+	REG_FLD_MOD(DSI_CTRL, 1, 0, 0);
+	*/
+}
+
+static inline unsigned ns2ddr(unsigned ns)
+{
+	/* convert time in ns to ddr ticks, rounding up */
+	unsigned long ddr_clk = dsi.current_cinfo.clkin4ddr / 4;
+	return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000;
+}
+
+static inline unsigned ddr2ns(unsigned ddr)
+{
+	unsigned long ddr_clk = dsi.current_cinfo.clkin4ddr / 4;
+	return ddr * 1000 * 1000 / (ddr_clk / 1000);
+}
+
+static void dsi_complexio_timings(void)
+{
+	u32 r;
+	u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit;
+	u32 tlpx_half, tclk_trail, tclk_zero;
+	u32 tclk_prepare;
+
+	/* calculate timings */
+
+	/* 1 * DDR_CLK = 2 * UI */
+
+	/* min 40ns + 4*UI	max 85ns + 6*UI */
+	ths_prepare = ns2ddr(70) + 2;
+
+	/* min 145ns + 10*UI */
+	ths_prepare_ths_zero = ns2ddr(175) + 2;
+
+	/* min max(8*UI, 60ns+4*UI) */
+	ths_trail = ns2ddr(60) + 5;
+
+	/* min 100ns */
+	ths_exit = ns2ddr(145);
+
+	/* tlpx min 50n */
+	tlpx_half = ns2ddr(25);
+
+	/* min 60ns */
+	tclk_trail = ns2ddr(60) + 2;
+
+	/* min 38ns, max 95ns */
+	tclk_prepare = ns2ddr(65);
+
+	/* min tclk-prepare + tclk-zero = 300ns */
+	tclk_zero = ns2ddr(260);
+
+	DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n",
+		ths_prepare, ddr2ns(ths_prepare),
+		ths_prepare_ths_zero, ddr2ns(ths_prepare_ths_zero));
+	DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n",
+			ths_trail, ddr2ns(ths_trail),
+			ths_exit, ddr2ns(ths_exit));
+
+	DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), "
+			"tclk_zero %u (%uns)\n",
+			tlpx_half, ddr2ns(tlpx_half),
+			tclk_trail, ddr2ns(tclk_trail),
+			tclk_zero, ddr2ns(tclk_zero));
+	DSSDBG("tclk_prepare %u (%uns)\n",
+			tclk_prepare, ddr2ns(tclk_prepare));
+
+	/* program timings */
+
+	r = dsi_read_reg(DSI_DSIPHY_CFG0);
+	r = FLD_MOD(r, ths_prepare, 31, 24);
+	r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16);
+	r = FLD_MOD(r, ths_trail, 15, 8);
+	r = FLD_MOD(r, ths_exit, 7, 0);
+	dsi_write_reg(DSI_DSIPHY_CFG0, r);
+
+	r = dsi_read_reg(DSI_DSIPHY_CFG1);
+	r = FLD_MOD(r, tlpx_half, 22, 16);
+	r = FLD_MOD(r, tclk_trail, 15, 8);
+	r = FLD_MOD(r, tclk_zero, 7, 0);
+	dsi_write_reg(DSI_DSIPHY_CFG1, r);
+
+	r = dsi_read_reg(DSI_DSIPHY_CFG2);
+	r = FLD_MOD(r, tclk_prepare, 7, 0);
+	dsi_write_reg(DSI_DSIPHY_CFG2, r);
+}
+
+
+static int dsi_complexio_init(struct omap_dss_device *dssdev)
+{
+	int r = 0;
+
+	DSSDBG("dsi_complexio_init\n");
+
+	/* CIO_CLK_ICG, enable L3 clk to CIO */
+	REG_FLD_MOD(DSI_CLK_CTRL, 1, 14, 14);
+
+	/* A dummy read using the SCP interface to any DSIPHY register is
+	 * required after DSIPHY reset to complete the reset of the DSI complex
+	 * I/O. */
+	dsi_read_reg(DSI_DSIPHY_CFG5);
+
+	if (wait_for_bit_change(DSI_DSIPHY_CFG5, 30, 1) != 1) {
+		DSSERR("ComplexIO PHY not coming out of reset.\n");
+		r = -ENODEV;
+		goto err;
+	}
+
+	dsi_complexio_config(dssdev);
+
+	r = dsi_complexio_power(DSI_COMPLEXIO_POWER_ON);
+
+	if (r)
+		goto err;
+
+	if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 29, 1) != 1) {
+		DSSERR("ComplexIO not coming out of reset.\n");
+		r = -ENODEV;
+		goto err;
+	}
+
+	if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 21, 1) != 1) {
+		DSSERR("ComplexIO LDO power down.\n");
+		r = -ENODEV;
+		goto err;
+	}
+
+	dsi_complexio_timings();
+
+	/*
+	   The configuration of the DSI complex I/O (number of data lanes,
+	   position, differential order) should not be changed while
+	   DSS.DSI_CLK_CRTRL[20] LP_CLK_ENABLE bit is set to 1. For the
+	   hardware to recognize a new configuration of the complex I/O (done
+	   in DSS.DSI_COMPLEXIO_CFG1 register), it is recommended to follow
+	   this sequence: First set the DSS.DSI_CTRL[0] IF_EN bit to 1, next
+	   reset the DSS.DSI_CTRL[0] IF_EN to 0, then set DSS.DSI_CLK_CTRL[20]
+	   LP_CLK_ENABLE to 1, and finally, set again the DSS.DSI_CTRL[0] IF_EN
+	   bit to 1. If the sequence is not followed, the DSi complex I/O
+	   configuration is undetermined.
+	   */
+	dsi_if_enable(1);
+	dsi_if_enable(0);
+	REG_FLD_MOD(DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
+	dsi_if_enable(1);
+	dsi_if_enable(0);
+
+	DSSDBG("CIO init done\n");
+err:
+	return r;
+}
+
+static void dsi_complexio_uninit(void)
+{
+	dsi_complexio_power(DSI_COMPLEXIO_POWER_OFF);
+}
+
+static int _dsi_wait_reset(void)
+{
+	int i = 0;
+
+	while (REG_GET(DSI_SYSSTATUS, 0, 0) == 0) {
+		if (i++ > 5) {
+			DSSERR("soft reset failed\n");
+			return -ENODEV;
+		}
+		udelay(1);
+	}
+
+	return 0;
+}
+
+static int _dsi_reset(void)
+{
+	/* Soft reset */
+	REG_FLD_MOD(DSI_SYSCONFIG, 1, 1, 1);
+	return _dsi_wait_reset();
+}
+
+static void dsi_reset_tx_fifo(int channel)
+{
+	u32 mask;
+	u32 l;
+
+	/* set fifosize of the channel to 0, then return the old size */
+	l = dsi_read_reg(DSI_TX_FIFO_VC_SIZE);
+
+	mask = FLD_MASK((8 * channel) + 7, (8 * channel) + 4);
+	dsi_write_reg(DSI_TX_FIFO_VC_SIZE, l & ~mask);
+
+	dsi_write_reg(DSI_TX_FIFO_VC_SIZE, l);
+}
+
+static void dsi_config_tx_fifo(enum fifo_size size1, enum fifo_size size2,
+		enum fifo_size size3, enum fifo_size size4)
+{
+	u32 r = 0;
+	int add = 0;
+	int i;
+
+	dsi.vc[0].fifo_size = size1;
+	dsi.vc[1].fifo_size = size2;
+	dsi.vc[2].fifo_size = size3;
+	dsi.vc[3].fifo_size = size4;
+
+	for (i = 0; i < 4; i++) {
+		u8 v;
+		int size = dsi.vc[i].fifo_size;
+
+		if (add + size > 4) {
+			DSSERR("Illegal FIFO configuration\n");
+			BUG();
+		}
+
+		v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
+		r |= v << (8 * i);
+		/*DSSDBG("TX FIFO vc %d: size %d, add %d\n", i, size, add); */
+		add += size;
+	}
+
+	dsi_write_reg(DSI_TX_FIFO_VC_SIZE, r);
+}
+
+static void dsi_config_rx_fifo(enum fifo_size size1, enum fifo_size size2,
+		enum fifo_size size3, enum fifo_size size4)
+{
+	u32 r = 0;
+	int add = 0;
+	int i;
+
+	dsi.vc[0].fifo_size = size1;
+	dsi.vc[1].fifo_size = size2;
+	dsi.vc[2].fifo_size = size3;
+	dsi.vc[3].fifo_size = size4;
+
+	for (i = 0; i < 4; i++) {
+		u8 v;
+		int size = dsi.vc[i].fifo_size;
+
+		if (add + size > 4) {
+			DSSERR("Illegal FIFO configuration\n");
+			BUG();
+		}
+
+		v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
+		r |= v << (8 * i);
+		/*DSSDBG("RX FIFO vc %d: size %d, add %d\n", i, size, add); */
+		add += size;
+	}
+
+	dsi_write_reg(DSI_RX_FIFO_VC_SIZE, r);
+}
+
+static int dsi_force_tx_stop_mode_io(void)
+{
+	u32 r;
+
+	r = dsi_read_reg(DSI_TIMING1);
+	r = FLD_MOD(r, 1, 15, 15);	/* FORCE_TX_STOP_MODE_IO */
+	dsi_write_reg(DSI_TIMING1, r);
+
+	if (wait_for_bit_change(DSI_TIMING1, 15, 0) != 0) {
+		DSSERR("TX_STOP bit not going down\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static void dsi_vc_print_status(int channel)
+{
+	u32 r;
+
+	r = dsi_read_reg(DSI_VC_CTRL(channel));
+	DSSDBG("vc %d: TX_FIFO_NOT_EMPTY %d, BTA_EN %d, VC_BUSY %d, "
+			"TX_FIFO_FULL %d, RX_FIFO_NOT_EMPTY %d, ",
+			channel,
+			FLD_GET(r, 5, 5),
+			FLD_GET(r, 6, 6),
+			FLD_GET(r, 15, 15),
+			FLD_GET(r, 16, 16),
+			FLD_GET(r, 20, 20));
+
+	r = dsi_read_reg(DSI_TX_FIFO_VC_EMPTINESS);
+	DSSDBG("EMPTINESS %d\n", (r >> (8 * channel)) & 0xff);
+}
+
+static int dsi_vc_enable(int channel, bool enable)
+{
+	if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO)
+		DSSDBG("dsi_vc_enable channel %d, enable %d\n",
+				channel, enable);
+
+	enable = enable ? 1 : 0;
+
+	REG_FLD_MOD(DSI_VC_CTRL(channel), enable, 0, 0);
+
+	if (wait_for_bit_change(DSI_VC_CTRL(channel), 0, enable) != enable) {
+			DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
+			return -EIO;
+	}
+
+	return 0;
+}
+
+static void dsi_vc_initial_config(int channel)
+{
+	u32 r;
+
+	DSSDBGF("%d", channel);
+
+	r = dsi_read_reg(DSI_VC_CTRL(channel));
+
+	if (FLD_GET(r, 15, 15)) /* VC_BUSY */
+		DSSERR("VC(%d) busy when trying to configure it!\n",
+				channel);
+
+	r = FLD_MOD(r, 0, 1, 1); /* SOURCE, 0 = L4 */
+	r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN  */
+	r = FLD_MOD(r, 0, 3, 3); /* BTA_LONG_EN */
+	r = FLD_MOD(r, 0, 4, 4); /* MODE, 0 = command */
+	r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */
+	r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */
+	r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */
+
+	r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
+	r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
+
+	dsi_write_reg(DSI_VC_CTRL(channel), r);
+
+	dsi.vc[channel].mode = DSI_VC_MODE_L4;
+}
+
+static void dsi_vc_config_l4(int channel)
+{
+	if (dsi.vc[channel].mode == DSI_VC_MODE_L4)
+		return;
+
+	DSSDBGF("%d", channel);
+
+	dsi_vc_enable(channel, 0);
+
+	if (REG_GET(DSI_VC_CTRL(channel), 15, 15)) /* VC_BUSY */
+		DSSERR("vc(%d) busy when trying to config for L4\n", channel);
+
+	REG_FLD_MOD(DSI_VC_CTRL(channel), 0, 1, 1); /* SOURCE, 0 = L4 */
+
+	dsi_vc_enable(channel, 1);
+
+	dsi.vc[channel].mode = DSI_VC_MODE_L4;
+}
+
+static void dsi_vc_config_vp(int channel)
+{
+	if (dsi.vc[channel].mode == DSI_VC_MODE_VP)
+		return;
+
+	DSSDBGF("%d", channel);
+
+	dsi_vc_enable(channel, 0);
+
+	if (REG_GET(DSI_VC_CTRL(channel), 15, 15)) /* VC_BUSY */
+		DSSERR("vc(%d) busy when trying to config for VP\n", channel);
+
+	REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 1, 1); /* SOURCE, 1 = video port */
+
+	dsi_vc_enable(channel, 1);
+
+	dsi.vc[channel].mode = DSI_VC_MODE_VP;
+}
+
+
+static void dsi_vc_enable_hs(int channel, bool enable)
+{
+	DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);
+
+	dsi_vc_enable(channel, 0);
+	dsi_if_enable(0);
+
+	REG_FLD_MOD(DSI_VC_CTRL(channel), enable, 9, 9);
+
+	dsi_vc_enable(channel, 1);
+	dsi_if_enable(1);
+
+	dsi_force_tx_stop_mode_io();
+}
+
+static void dsi_vc_flush_long_data(int channel)
+{
+	while (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
+		u32 val;
+		val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
+		DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n",
+				(val >> 0) & 0xff,
+				(val >> 8) & 0xff,
+				(val >> 16) & 0xff,
+				(val >> 24) & 0xff);
+	}
+}
+
+static void dsi_show_rx_ack_with_err(u16 err)
+{
+	DSSERR("\tACK with ERROR (%#x):\n", err);
+	if (err & (1 << 0))
+		DSSERR("\t\tSoT Error\n");
+	if (err & (1 << 1))
+		DSSERR("\t\tSoT Sync Error\n");
+	if (err & (1 << 2))
+		DSSERR("\t\tEoT Sync Error\n");
+	if (err & (1 << 3))
+		DSSERR("\t\tEscape Mode Entry Command Error\n");
+	if (err & (1 << 4))
+		DSSERR("\t\tLP Transmit Sync Error\n");
+	if (err & (1 << 5))
+		DSSERR("\t\tHS Receive Timeout Error\n");
+	if (err & (1 << 6))
+		DSSERR("\t\tFalse Control Error\n");
+	if (err & (1 << 7))
+		DSSERR("\t\t(reserved7)\n");
+	if (err & (1 << 8))
+		DSSERR("\t\tECC Error, single-bit (corrected)\n");
+	if (err & (1 << 9))
+		DSSERR("\t\tECC Error, multi-bit (not corrected)\n");
+	if (err & (1 << 10))
+		DSSERR("\t\tChecksum Error\n");
+	if (err & (1 << 11))
+		DSSERR("\t\tData type not recognized\n");
+	if (err & (1 << 12))
+		DSSERR("\t\tInvalid VC ID\n");
+	if (err & (1 << 13))
+		DSSERR("\t\tInvalid Transmission Length\n");
+	if (err & (1 << 14))
+		DSSERR("\t\t(reserved14)\n");
+	if (err & (1 << 15))
+		DSSERR("\t\tDSI Protocol Violation\n");
+}
+
+static u16 dsi_vc_flush_receive_data(int channel)
+{
+	/* RX_FIFO_NOT_EMPTY */
+	while (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
+		u32 val;
+		u8 dt;
+		val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
+		DSSDBG("\trawval %#08x\n", val);
+		dt = FLD_GET(val, 5, 0);
+		if (dt == DSI_DT_RX_ACK_WITH_ERR) {
+			u16 err = FLD_GET(val, 23, 8);
+			dsi_show_rx_ack_with_err(err);
+		} else if (dt == DSI_DT_RX_SHORT_READ_1) {
+			DSSDBG("\tDCS short response, 1 byte: %#x\n",
+					FLD_GET(val, 23, 8));
+		} else if (dt == DSI_DT_RX_SHORT_READ_2) {
+			DSSDBG("\tDCS short response, 2 byte: %#x\n",
+					FLD_GET(val, 23, 8));
+		} else if (dt == DSI_DT_RX_DCS_LONG_READ) {
+			DSSDBG("\tDCS long response, len %d\n",
+					FLD_GET(val, 23, 8));
+			dsi_vc_flush_long_data(channel);
+		} else {
+			DSSERR("\tunknown datatype 0x%02x\n", dt);
+		}
+	}
+	return 0;
+}
+
+static int dsi_vc_send_bta(int channel)
+{
+	if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO &&
+			(dsi.debug_write || dsi.debug_read))
+		DSSDBG("dsi_vc_send_bta %d\n", channel);
+
+	WARN_ON(!mutex_is_locked(&dsi.bus_lock));
+
+	if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {	/* RX_FIFO_NOT_EMPTY */
+		DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
+		dsi_vc_flush_receive_data(channel);
+	}
+
+	REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
+
+	return 0;
+}
+
+int dsi_vc_send_bta_sync(int channel)
+{
+	int r = 0;
+	u32 err;
+
+	INIT_COMPLETION(dsi.bta_completion);
+
+	dsi_vc_enable_bta_irq(channel);
+
+	r = dsi_vc_send_bta(channel);
+	if (r)
+		goto err;
+
+	if (wait_for_completion_timeout(&dsi.bta_completion,
+				msecs_to_jiffies(500)) == 0) {
+		DSSERR("Failed to receive BTA\n");
+		r = -EIO;
+		goto err;
+	}
+
+	err = dsi_get_errors();
+	if (err) {
+		DSSERR("Error while sending BTA: %x\n", err);
+		r = -EIO;
+		goto err;
+	}
+err:
+	dsi_vc_disable_bta_irq(channel);
+
+	return r;
+}
+EXPORT_SYMBOL(dsi_vc_send_bta_sync);
+
+static inline void dsi_vc_write_long_header(int channel, u8 data_type,
+		u16 len, u8 ecc)
+{
+	u32 val;
+	u8 data_id;
+
+	WARN_ON(!mutex_is_locked(&dsi.bus_lock));
+
+	/*data_id = data_type | channel << 6; */
+	data_id = data_type | dsi.vc[channel].dest_per << 6;
+
+	val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
+		FLD_VAL(ecc, 31, 24);
+
+	dsi_write_reg(DSI_VC_LONG_PACKET_HEADER(channel), val);
+}
+
+static inline void dsi_vc_write_long_payload(int channel,
+		u8 b1, u8 b2, u8 b3, u8 b4)
+{
+	u32 val;
+
+	val = b4 << 24 | b3 << 16 | b2 << 8  | b1 << 0;
+
+/*	DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
+			b1, b2, b3, b4, val); */
+
+	dsi_write_reg(DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
+}
+
+static int dsi_vc_send_long(int channel, u8 data_type, u8 *data, u16 len,
+		u8 ecc)
+{
+	/*u32 val; */
+	int i;
+	u8 *p;
+	int r = 0;
+	u8 b1, b2, b3, b4;
+
+	if (dsi.debug_write)
+		DSSDBG("dsi_vc_send_long, %d bytes\n", len);
+
+	/* len + header */
+	if (dsi.vc[channel].fifo_size * 32 * 4 < len + 4) {
+		DSSERR("unable to send long packet: packet too long.\n");
+		return -EINVAL;
+	}
+
+	dsi_vc_config_l4(channel);
+
+	dsi_vc_write_long_header(channel, data_type, len, ecc);
+
+	/*dsi_vc_print_status(0); */
+
+	p = data;
+	for (i = 0; i < len >> 2; i++) {
+		if (dsi.debug_write)
+			DSSDBG("\tsending full packet %d\n", i);
+		/*dsi_vc_print_status(0); */
+
+		b1 = *p++;
+		b2 = *p++;
+		b3 = *p++;
+		b4 = *p++;
+
+		dsi_vc_write_long_payload(channel, b1, b2, b3, b4);
+	}
+
+	i = len % 4;
+	if (i) {
+		b1 = 0; b2 = 0; b3 = 0;
+
+		if (dsi.debug_write)
+			DSSDBG("\tsending remainder bytes %d\n", i);
+
+		switch (i) {
+		case 3:
+			b1 = *p++;
+			b2 = *p++;
+			b3 = *p++;
+			break;
+		case 2:
+			b1 = *p++;
+			b2 = *p++;
+			break;
+		case 1:
+			b1 = *p++;
+			break;
+		}
+
+		dsi_vc_write_long_payload(channel, b1, b2, b3, 0);
+	}
+
+	return r;
+}
+
+static int dsi_vc_send_short(int channel, u8 data_type, u16 data, u8 ecc)
+{
+	u32 r;
+	u8 data_id;
+
+	WARN_ON(!mutex_is_locked(&dsi.bus_lock));
+
+	if (dsi.debug_write)
+		DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
+				channel,
+				data_type, data & 0xff, (data >> 8) & 0xff);
+
+	dsi_vc_config_l4(channel);
+
+	if (FLD_GET(dsi_read_reg(DSI_VC_CTRL(channel)), 16, 16)) {
+		DSSERR("ERROR FIFO FULL, aborting transfer\n");
+		return -EINVAL;
+	}
+
+	data_id = data_type | channel << 6;
+
+	r = (data_id << 0) | (data << 8) | (ecc << 24);
+
+	dsi_write_reg(DSI_VC_SHORT_PACKET_HEADER(channel), r);
+
+	return 0;
+}
+
+int dsi_vc_send_null(int channel)
+{
+	u8 nullpkg[] = {0, 0, 0, 0};
+	return dsi_vc_send_long(0, DSI_DT_NULL_PACKET, nullpkg, 4, 0);
+}
+EXPORT_SYMBOL(dsi_vc_send_null);
+
+int dsi_vc_dcs_write_nosync(int channel, u8 *data, int len)
+{
+	int r;
+
+	BUG_ON(len == 0);
+
+	if (len == 1) {
+		r = dsi_vc_send_short(channel, DSI_DT_DCS_SHORT_WRITE_0,
+				data[0], 0);
+	} else if (len == 2) {
+		r = dsi_vc_send_short(channel, DSI_DT_DCS_SHORT_WRITE_1,
+				data[0] | (data[1] << 8), 0);
+	} else {
+		/* 0x39 = DCS Long Write */
+		r = dsi_vc_send_long(channel, DSI_DT_DCS_LONG_WRITE,
+				data, len, 0);
+	}
+
+	return r;
+}
+EXPORT_SYMBOL(dsi_vc_dcs_write_nosync);
+
+int dsi_vc_dcs_write(int channel, u8 *data, int len)
+{
+	int r;
+
+	r = dsi_vc_dcs_write_nosync(channel, data, len);
+	if (r)
+		return r;
+
+	r = dsi_vc_send_bta_sync(channel);
+
+	return r;
+}
+EXPORT_SYMBOL(dsi_vc_dcs_write);
+
+int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
+{
+	u32 val;
+	u8 dt;
+	int r;
+
+	if (dsi.debug_read)
+		DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %u)\n", channel, dcs_cmd);
+
+	r = dsi_vc_send_short(channel, DSI_DT_DCS_READ, dcs_cmd, 0);
+	if (r)
+		return r;
+
+	r = dsi_vc_send_bta_sync(channel);
+	if (r)
+		return r;
+
+	/* RX_FIFO_NOT_EMPTY */
+	if (REG_GET(DSI_VC_CTRL(channel), 20, 20) == 0) {
+		DSSERR("RX fifo empty when trying to read.\n");
+		return -EIO;
+	}
+
+	val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
+	if (dsi.debug_read)
+		DSSDBG("\theader: %08x\n", val);
+	dt = FLD_GET(val, 5, 0);
+	if (dt == DSI_DT_RX_ACK_WITH_ERR) {
+		u16 err = FLD_GET(val, 23, 8);
+		dsi_show_rx_ack_with_err(err);
+		return -EIO;
+
+	} else if (dt == DSI_DT_RX_SHORT_READ_1) {
+		u8 data = FLD_GET(val, 15, 8);
+		if (dsi.debug_read)
+			DSSDBG("\tDCS short response, 1 byte: %02x\n", data);
+
+		if (buflen < 1)
+			return -EIO;
+
+		buf[0] = data;
+
+		return 1;
+	} else if (dt == DSI_DT_RX_SHORT_READ_2) {
+		u16 data = FLD_GET(val, 23, 8);
+		if (dsi.debug_read)
+			DSSDBG("\tDCS short response, 2 byte: %04x\n", data);
+
+		if (buflen < 2)
+			return -EIO;
+
+		buf[0] = data & 0xff;
+		buf[1] = (data >> 8) & 0xff;
+
+		return 2;
+	} else if (dt == DSI_DT_RX_DCS_LONG_READ) {
+		int w;
+		int len = FLD_GET(val, 23, 8);
+		if (dsi.debug_read)
+			DSSDBG("\tDCS long response, len %d\n", len);
+
+		if (len > buflen)
+			return -EIO;
+
+		/* two byte checksum ends the packet, not included in len */
+		for (w = 0; w < len + 2;) {
+			int b;
+			val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
+			if (dsi.debug_read)
+				DSSDBG("\t\t%02x %02x %02x %02x\n",
+						(val >> 0) & 0xff,
+						(val >> 8) & 0xff,
+						(val >> 16) & 0xff,
+						(val >> 24) & 0xff);
+
+			for (b = 0; b < 4; ++b) {
+				if (w < len)
+					buf[w] = (val >> (b * 8)) & 0xff;
+				/* we discard the 2 byte checksum */
+				++w;
+			}
+		}
+
+		return len;
+
+	} else {
+		DSSERR("\tunknown datatype 0x%02x\n", dt);
+		return -EIO;
+	}
+}
+EXPORT_SYMBOL(dsi_vc_dcs_read);
+
+
+int dsi_vc_set_max_rx_packet_size(int channel, u16 len)
+{
+	int r;
+	r = dsi_vc_send_short(channel, DSI_DT_SET_MAX_RET_PKG_SIZE,
+			len, 0);
+
+	if (r)
+		return r;
+
+	r = dsi_vc_send_bta_sync(channel);
+
+	return r;
+}
+EXPORT_SYMBOL(dsi_vc_set_max_rx_packet_size);
+
+static void dsi_set_lp_rx_timeout(unsigned long ns)
+{
+	u32 r;
+	unsigned x4, x16;
+	unsigned long fck;
+	unsigned long ticks;
+
+	/* ticks in DSI_FCK */
+
+	fck = dsi_fclk_rate();
+	ticks = (fck / 1000 / 1000) * ns / 1000;
+	x4 = 0;
+	x16 = 0;
+
+	if (ticks > 0x1fff) {
+		ticks = (fck / 1000 / 1000) * ns / 1000 / 4;
+		x4 = 1;
+		x16 = 0;
+	}
+
+	if (ticks > 0x1fff) {
+		ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
+		x4 = 0;
+		x16 = 1;
+	}
+
+	if (ticks > 0x1fff) {
+		ticks = (fck / 1000 / 1000) * ns / 1000 / (4 * 16);
+		x4 = 1;
+		x16 = 1;
+	}
+
+	if (ticks > 0x1fff) {
+		DSSWARN("LP_TX_TO over limit, setting it to max\n");
+		ticks = 0x1fff;
+		x4 = 1;
+		x16 = 1;
+	}
+
+	r = dsi_read_reg(DSI_TIMING2);
+	r = FLD_MOD(r, 1, 15, 15);	/* LP_RX_TO */
+	r = FLD_MOD(r, x16, 14, 14);	/* LP_RX_TO_X16 */
+	r = FLD_MOD(r, x4, 13, 13);	/* LP_RX_TO_X4 */
+	r = FLD_MOD(r, ticks, 12, 0);	/* LP_RX_COUNTER */
+	dsi_write_reg(DSI_TIMING2, r);
+
+	DSSDBG("LP_RX_TO %lu ns (%#lx ticks%s%s)\n",
+			(ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
+			(fck / 1000 / 1000),
+			ticks, x4 ? " x4" : "", x16 ? " x16" : "");
+}
+
+static void dsi_set_ta_timeout(unsigned long ns)
+{
+	u32 r;
+	unsigned x8, x16;
+	unsigned long fck;
+	unsigned long ticks;
+
+	/* ticks in DSI_FCK */
+	fck = dsi_fclk_rate();
+	ticks = (fck / 1000 / 1000) * ns / 1000;
+	x8 = 0;
+	x16 = 0;
+
+	if (ticks > 0x1fff) {
+		ticks = (fck / 1000 / 1000) * ns / 1000 / 8;
+		x8 = 1;
+		x16 = 0;
+	}
+
+	if (ticks > 0x1fff) {
+		ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
+		x8 = 0;
+		x16 = 1;
+	}
+
+	if (ticks > 0x1fff) {
+		ticks = (fck / 1000 / 1000) * ns / 1000 / (8 * 16);
+		x8 = 1;
+		x16 = 1;
+	}
+
+	if (ticks > 0x1fff) {
+		DSSWARN("TA_TO over limit, setting it to max\n");
+		ticks = 0x1fff;
+		x8 = 1;
+		x16 = 1;
+	}
+
+	r = dsi_read_reg(DSI_TIMING1);
+	r = FLD_MOD(r, 1, 31, 31);	/* TA_TO */
+	r = FLD_MOD(r, x16, 30, 30);	/* TA_TO_X16 */
+	r = FLD_MOD(r, x8, 29, 29);	/* TA_TO_X8 */
+	r = FLD_MOD(r, ticks, 28, 16);	/* TA_TO_COUNTER */
+	dsi_write_reg(DSI_TIMING1, r);
+
+	DSSDBG("TA_TO %lu ns (%#lx ticks%s%s)\n",
+			(ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1) * 1000) /
+			(fck / 1000 / 1000),
+			ticks, x8 ? " x8" : "", x16 ? " x16" : "");
+}
+
+static void dsi_set_stop_state_counter(unsigned long ns)
+{
+	u32 r;
+	unsigned x4, x16;
+	unsigned long fck;
+	unsigned long ticks;
+
+	/* ticks in DSI_FCK */
+
+	fck = dsi_fclk_rate();
+	ticks = (fck / 1000 / 1000) * ns / 1000;
+	x4 = 0;
+	x16 = 0;
+
+	if (ticks > 0x1fff) {
+		ticks = (fck / 1000 / 1000) * ns / 1000 / 4;
+		x4 = 1;
+		x16 = 0;
+	}
+
+	if (ticks > 0x1fff) {
+		ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
+		x4 = 0;
+		x16 = 1;
+	}
+
+	if (ticks > 0x1fff) {
+		ticks = (fck / 1000 / 1000) * ns / 1000 / (4 * 16);
+		x4 = 1;
+		x16 = 1;
+	}
+
+	if (ticks > 0x1fff) {
+		DSSWARN("STOP_STATE_COUNTER_IO over limit, "
+				"setting it to max\n");
+		ticks = 0x1fff;
+		x4 = 1;
+		x16 = 1;
+	}
+
+	r = dsi_read_reg(DSI_TIMING1);
+	r = FLD_MOD(r, 1, 15, 15);	/* FORCE_TX_STOP_MODE_IO */
+	r = FLD_MOD(r, x16, 14, 14);	/* STOP_STATE_X16_IO */
+	r = FLD_MOD(r, x4, 13, 13);	/* STOP_STATE_X4_IO */
+	r = FLD_MOD(r, ticks, 12, 0);	/* STOP_STATE_COUNTER_IO */
+	dsi_write_reg(DSI_TIMING1, r);
+
+	DSSDBG("STOP_STATE_COUNTER %lu ns (%#lx ticks%s%s)\n",
+			(ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
+			(fck / 1000 / 1000),
+			ticks, x4 ? " x4" : "", x16 ? " x16" : "");
+}
+
+static void dsi_set_hs_tx_timeout(unsigned long ns)
+{
+	u32 r;
+	unsigned x4, x16;
+	unsigned long fck;
+	unsigned long ticks;
+
+	/* ticks in TxByteClkHS */
+
+	fck = dsi_get_txbyteclkhs();
+	ticks = (fck / 1000 / 1000) * ns / 1000;
+	x4 = 0;
+	x16 = 0;
+
+	if (ticks > 0x1fff) {
+		ticks = (fck / 1000 / 1000) * ns / 1000 / 4;
+		x4 = 1;
+		x16 = 0;
+	}
+
+	if (ticks > 0x1fff) {
+		ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
+		x4 = 0;
+		x16 = 1;
+	}
+
+	if (ticks > 0x1fff) {
+		ticks = (fck / 1000 / 1000) * ns / 1000 / (4 * 16);
+		x4 = 1;
+		x16 = 1;
+	}
+
+	if (ticks > 0x1fff) {
+		DSSWARN("HS_TX_TO over limit, setting it to max\n");
+		ticks = 0x1fff;
+		x4 = 1;
+		x16 = 1;
+	}
+
+	r = dsi_read_reg(DSI_TIMING2);
+	r = FLD_MOD(r, 1, 31, 31);	/* HS_TX_TO */
+	r = FLD_MOD(r, x16, 30, 30);	/* HS_TX_TO_X16 */
+	r = FLD_MOD(r, x4, 29, 29);	/* HS_TX_TO_X8 (4 really) */
+	r = FLD_MOD(r, ticks, 28, 16);	/* HS_TX_TO_COUNTER */
+	dsi_write_reg(DSI_TIMING2, r);
+
+	DSSDBG("HS_TX_TO %lu ns (%#lx ticks%s%s)\n",
+			(ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
+			(fck / 1000 / 1000),
+			ticks, x4 ? " x4" : "", x16 ? " x16" : "");
+}
+static int dsi_proto_config(struct omap_dss_device *dssdev)
+{
+	u32 r;
+	int buswidth = 0;
+
+	dsi_config_tx_fifo(DSI_FIFO_SIZE_128,
+			DSI_FIFO_SIZE_0,
+			DSI_FIFO_SIZE_0,
+			DSI_FIFO_SIZE_0);
+
+	dsi_config_rx_fifo(DSI_FIFO_SIZE_128,
+			DSI_FIFO_SIZE_0,
+			DSI_FIFO_SIZE_0,
+			DSI_FIFO_SIZE_0);
+
+	/* XXX what values for the timeouts? */
+	dsi_set_stop_state_counter(1000);
+	dsi_set_ta_timeout(6400000);
+	dsi_set_lp_rx_timeout(48000);
+	dsi_set_hs_tx_timeout(1000000);
+
+	switch (dssdev->ctrl.pixel_size) {
+	case 16:
+		buswidth = 0;
+		break;
+	case 18:
+		buswidth = 1;
+		break;
+	case 24:
+		buswidth = 2;
+		break;
+	default:
+		BUG();
+	}
+
+	r = dsi_read_reg(DSI_CTRL);
+	r = FLD_MOD(r, 1, 1, 1);	/* CS_RX_EN */
+	r = FLD_MOD(r, 1, 2, 2);	/* ECC_RX_EN */
+	r = FLD_MOD(r, 1, 3, 3);	/* TX_FIFO_ARBITRATION */
+	r = FLD_MOD(r, 1, 4, 4);	/* VP_CLK_RATIO, always 1, see errata*/
+	r = FLD_MOD(r, buswidth, 7, 6); /* VP_DATA_BUS_WIDTH */
+	r = FLD_MOD(r, 0, 8, 8);	/* VP_CLK_POL */
+	r = FLD_MOD(r, 2, 13, 12);	/* LINE_BUFFER, 2 lines */
+	r = FLD_MOD(r, 1, 14, 14);	/* TRIGGER_RESET_MODE */
+	r = FLD_MOD(r, 1, 19, 19);	/* EOT_ENABLE */
+	r = FLD_MOD(r, 1, 24, 24);	/* DCS_CMD_ENABLE */
+	r = FLD_MOD(r, 0, 25, 25);	/* DCS_CMD_CODE, 1=start, 0=continue */
+
+	dsi_write_reg(DSI_CTRL, r);
+
+	dsi_vc_initial_config(0);
+
+	/* set all vc targets to peripheral 0 */
+	dsi.vc[0].dest_per = 0;
+	dsi.vc[1].dest_per = 0;
+	dsi.vc[2].dest_per = 0;
+	dsi.vc[3].dest_per = 0;
+
+	return 0;
+}
+
+static void dsi_proto_timings(struct omap_dss_device *dssdev)
+{
+	unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail;
+	unsigned tclk_pre, tclk_post;
+	unsigned ths_prepare, ths_prepare_ths_zero, ths_zero;
+	unsigned ths_trail, ths_exit;
+	unsigned ddr_clk_pre, ddr_clk_post;
+	unsigned enter_hs_mode_lat, exit_hs_mode_lat;
+	unsigned ths_eot;
+	u32 r;
+
+	r = dsi_read_reg(DSI_DSIPHY_CFG0);
+	ths_prepare = FLD_GET(r, 31, 24);
+	ths_prepare_ths_zero = FLD_GET(r, 23, 16);
+	ths_zero = ths_prepare_ths_zero - ths_prepare;
+	ths_trail = FLD_GET(r, 15, 8);
+	ths_exit = FLD_GET(r, 7, 0);
+
+	r = dsi_read_reg(DSI_DSIPHY_CFG1);
+	tlpx = FLD_GET(r, 22, 16) * 2;
+	tclk_trail = FLD_GET(r, 15, 8);
+	tclk_zero = FLD_GET(r, 7, 0);
+
+	r = dsi_read_reg(DSI_DSIPHY_CFG2);
+	tclk_prepare = FLD_GET(r, 7, 0);
+
+	/* min 8*UI */
+	tclk_pre = 20;
+	/* min 60ns + 52*UI */
+	tclk_post = ns2ddr(60) + 26;
+
+	/* ths_eot is 2 for 2 datalanes and 4 for 1 datalane */
+	if (dssdev->phy.dsi.data1_lane != 0 &&
+			dssdev->phy.dsi.data2_lane != 0)
+		ths_eot = 2;
+	else
+		ths_eot = 4;
+
+	ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare,
+			4);
+	ddr_clk_post = DIV_ROUND_UP(tclk_post + ths_trail, 4) + ths_eot;
+
+	BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255);
+	BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255);
+
+	r = dsi_read_reg(DSI_CLK_TIMING);
+	r = FLD_MOD(r, ddr_clk_pre, 15, 8);
+	r = FLD_MOD(r, ddr_clk_post, 7, 0);
+	dsi_write_reg(DSI_CLK_TIMING, r);
+
+	DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n",
+			ddr_clk_pre,
+			ddr_clk_post);
+
+	enter_hs_mode_lat = 1 + DIV_ROUND_UP(tlpx, 4) +
+		DIV_ROUND_UP(ths_prepare, 4) +
+		DIV_ROUND_UP(ths_zero + 3, 4);
+
+	exit_hs_mode_lat = DIV_ROUND_UP(ths_trail + ths_exit, 4) + 1 + ths_eot;
+
+	r = FLD_VAL(enter_hs_mode_lat, 31, 16) |
+		FLD_VAL(exit_hs_mode_lat, 15, 0);
+	dsi_write_reg(DSI_VM_TIMING7, r);
+
+	DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n",
+			enter_hs_mode_lat, exit_hs_mode_lat);
+}
+
+
+#define DSI_DECL_VARS \
+	int __dsi_cb = 0; u32 __dsi_cv = 0;
+
+#define DSI_FLUSH(ch) \
+	if (__dsi_cb > 0) { \
+		/*DSSDBG("sending long packet %#010x\n", __dsi_cv);*/ \
+		dsi_write_reg(DSI_VC_LONG_PACKET_PAYLOAD(ch), __dsi_cv); \
+		__dsi_cb = __dsi_cv = 0; \
+	}
+
+#define DSI_PUSH(ch, data) \
+	do { \
+		__dsi_cv |= (data) << (__dsi_cb * 8); \
+		/*DSSDBG("cv = %#010x, cb = %d\n", __dsi_cv, __dsi_cb);*/ \
+		if (++__dsi_cb > 3) \
+			DSI_FLUSH(ch); \
+	} while (0)
+
+static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
+			int x, int y, int w, int h)
+{
+	/* Note: supports only 24bit colors in 32bit container */
+	int first = 1;
+	int fifo_stalls = 0;
+	int max_dsi_packet_size;
+	int max_data_per_packet;
+	int max_pixels_per_packet;
+	int pixels_left;
+	int bytespp = dssdev->ctrl.pixel_size / 8;
+	int scr_width;
+	u32 __iomem *data;
+	int start_offset;
+	int horiz_inc;
+	int current_x;
+	struct omap_overlay *ovl;
+
+	debug_irq = 0;
+
+	DSSDBG("dsi_update_screen_l4 (%d,%d %dx%d)\n",
+			x, y, w, h);
+
+	ovl = dssdev->manager->overlays[0];
+
+	if (ovl->info.color_mode != OMAP_DSS_COLOR_RGB24U)
+		return -EINVAL;
+
+	if (dssdev->ctrl.pixel_size != 24)
+		return -EINVAL;
+
+	scr_width = ovl->info.screen_width;
+	data = ovl->info.vaddr;
+
+	start_offset = scr_width * y + x;
+	horiz_inc = scr_width - w;
+	current_x = x;
+
+	/* We need header(4) + DCSCMD(1) + pixels(numpix*bytespp) bytes
+	 * in fifo */
+
+	/* When using CPU, max long packet size is TX buffer size */
+	max_dsi_packet_size = dsi.vc[0].fifo_size * 32 * 4;
+
+	/* we seem to get better perf if we divide the tx fifo to half,
+	   and while the other half is being sent, we fill the other half
+	   max_dsi_packet_size /= 2; */
+
+	max_data_per_packet = max_dsi_packet_size - 4 - 1;
+
+	max_pixels_per_packet = max_data_per_packet / bytespp;
+
+	DSSDBG("max_pixels_per_packet %d\n", max_pixels_per_packet);
+
+	pixels_left = w * h;
+
+	DSSDBG("total pixels %d\n", pixels_left);
+
+	data += start_offset;
+
+	while (pixels_left > 0) {
+		/* 0x2c = write_memory_start */
+		/* 0x3c = write_memory_continue */
+		u8 dcs_cmd = first ? 0x2c : 0x3c;
+		int pixels;
+		DSI_DECL_VARS;
+		first = 0;
+
+#if 1
+		/* using fifo not empty */
+		/* TX_FIFO_NOT_EMPTY */
+		while (FLD_GET(dsi_read_reg(DSI_VC_CTRL(0)), 5, 5)) {
+			udelay(1);
+			fifo_stalls++;
+			if (fifo_stalls > 0xfffff) {
+				DSSERR("fifo stalls overflow, pixels left %d\n",
+						pixels_left);
+				dsi_if_enable(0);
+				return -EIO;
+			}
+		}
+#elif 1
+		/* using fifo emptiness */
+		while ((REG_GET(DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 <
+				max_dsi_packet_size) {
+			fifo_stalls++;
+			if (fifo_stalls > 0xfffff) {
+				DSSERR("fifo stalls overflow, pixels left %d\n",
+					       pixels_left);
+				dsi_if_enable(0);
+				return -EIO;
+			}
+		}
+#else
+		while ((REG_GET(DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 == 0) {
+			fifo_stalls++;
+			if (fifo_stalls > 0xfffff) {
+				DSSERR("fifo stalls overflow, pixels left %d\n",
+					       pixels_left);
+				dsi_if_enable(0);
+				return -EIO;
+			}
+		}
+#endif
+		pixels = min(max_pixels_per_packet, pixels_left);
+
+		pixels_left -= pixels;
+
+		dsi_vc_write_long_header(0, DSI_DT_DCS_LONG_WRITE,
+				1 + pixels * bytespp, 0);
+
+		DSI_PUSH(0, dcs_cmd);
+
+		while (pixels-- > 0) {
+			u32 pix = __raw_readl(data++);
+
+			DSI_PUSH(0, (pix >> 16) & 0xff);
+			DSI_PUSH(0, (pix >> 8) & 0xff);
+			DSI_PUSH(0, (pix >> 0) & 0xff);
+
+			current_x++;
+			if (current_x == x+w) {
+				current_x = x;
+				data += horiz_inc;
+			}
+		}
+
+		DSI_FLUSH(0);
+	}
+
+	return 0;
+}
+
+static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
+		u16 x, u16 y, u16 w, u16 h)
+{
+	unsigned bytespp;
+	unsigned bytespl;
+	unsigned bytespf;
+	unsigned total_len;
+	unsigned packet_payload;
+	unsigned packet_len;
+	u32 l;
+	bool use_te_trigger;
+	const unsigned channel = 0;
+	/* line buffer is 1024 x 24bits */
+	/* XXX: for some reason using full buffer size causes considerable TX
+	 * slowdown with update sizes that fill the whole buffer */
+	const unsigned line_buf_size = 1023 * 3;
+
+	use_te_trigger = dsi.te_enabled && !dsi.use_ext_te;
+
+	if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO)
+		DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n",
+				x, y, w, h);
+
+	bytespp	= dssdev->ctrl.pixel_size / 8;
+	bytespl = w * bytespp;
+	bytespf = bytespl * h;
+
+	/* NOTE: packet_payload has to be equal to N * bytespl, where N is
+	 * number of lines in a packet.  See errata about VP_CLK_RATIO */
+
+	if (bytespf < line_buf_size)
+		packet_payload = bytespf;
+	else
+		packet_payload = (line_buf_size) / bytespl * bytespl;
+
+	packet_len = packet_payload + 1;	/* 1 byte for DCS cmd */
+	total_len = (bytespf / packet_payload) * packet_len;
+
+	if (bytespf % packet_payload)
+		total_len += (bytespf % packet_payload) + 1;
+
+	if (0)
+		dsi_vc_print_status(1);
+
+	l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
+	dsi_write_reg(DSI_VC_TE(channel), l);
+
+	dsi_vc_write_long_header(channel, DSI_DT_DCS_LONG_WRITE, packet_len, 0);
+
+	if (use_te_trigger)
+		l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
+	else
+		l = FLD_MOD(l, 1, 31, 31); /* TE_START */
+	dsi_write_reg(DSI_VC_TE(channel), l);
+
+	/* We put SIDLEMODE to no-idle for the duration of the transfer,
+	 * because DSS interrupts are not capable of waking up the CPU and the
+	 * framedone interrupt could be delayed for quite a long time. I think
+	 * the same goes for any DSS interrupts, but for some reason I have not
+	 * seen the problem anywhere else than here.
+	 */
+	dispc_disable_sidle();
+
+	dss_start_update(dssdev);
+
+	if (use_te_trigger) {
+		/* disable LP_RX_TO, so that we can receive TE.  Time to wait
+		 * for TE is longer than the timer allows */
+		REG_FLD_MOD(DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
+
+		dsi_vc_send_bta(channel);
+
+#ifdef DSI_CATCH_MISSING_TE
+		mod_timer(&dsi.te_timer, jiffies + msecs_to_jiffies(250));
+#endif
+	}
+}
+
+#ifdef DSI_CATCH_MISSING_TE
+static void dsi_te_timeout(unsigned long arg)
+{
+	DSSERR("TE not received for 250ms!\n");
+}
+#endif
+
+static void dsi_framedone_irq_callback(void *data, u32 mask)
+{
+	/* Note: We get FRAMEDONE when DISPC has finished sending pixels and
+	 * turns itself off. However, DSI still has the pixels in its buffers,
+	 * and is sending the data.
+	 */
+
+	/* SIDLEMODE back to smart-idle */
+	dispc_enable_sidle();
+
+	dsi.framedone_received = true;
+	wake_up(&dsi.waitqueue);
+}
+
+static void dsi_set_update_region(struct omap_dss_device *dssdev,
+		u16 x, u16 y, u16 w, u16 h)
+{
+	spin_lock(&dsi.update_lock);
+	if (dsi.update_region.dirty) {
+		dsi.update_region.x = min(x, dsi.update_region.x);
+		dsi.update_region.y = min(y, dsi.update_region.y);
+		dsi.update_region.w = max(w, dsi.update_region.w);
+		dsi.update_region.h = max(h, dsi.update_region.h);
+	} else {
+		dsi.update_region.x = x;
+		dsi.update_region.y = y;
+		dsi.update_region.w = w;
+		dsi.update_region.h = h;
+	}
+
+	dsi.update_region.device = dssdev;
+	dsi.update_region.dirty = true;
+
+	spin_unlock(&dsi.update_lock);
+
+}
+
+static int dsi_set_update_mode(struct omap_dss_device *dssdev,
+		enum omap_dss_update_mode mode)
+{
+	int r = 0;
+	int i;
+
+	WARN_ON(!mutex_is_locked(&dsi.bus_lock));
+
+	if (dsi.update_mode != mode) {
+		dsi.update_mode = mode;
+
+		/* Mark the overlays dirty, and do apply(), so that we get the
+		 * overlays configured properly after update mode change. */
+		for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+			struct omap_overlay *ovl;
+			ovl = omap_dss_get_overlay(i);
+			if (ovl->manager == dssdev->manager)
+				ovl->info_dirty = true;
+		}
+
+		r = dssdev->manager->apply(dssdev->manager);
+
+		if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE &&
+				mode == OMAP_DSS_UPDATE_AUTO) {
+			u16 w, h;
+
+			DSSDBG("starting auto update\n");
+
+			dssdev->get_resolution(dssdev, &w, &h);
+
+			dsi_set_update_region(dssdev, 0, 0, w, h);
+
+			dsi_perf_mark_start_auto();
+
+			wake_up(&dsi.waitqueue);
+		}
+	}
+
+	return r;
+}
+
+static int dsi_set_te(struct omap_dss_device *dssdev, bool enable)
+{
+	int r;
+	r = dssdev->driver->enable_te(dssdev, enable);
+	/* XXX for some reason, DSI TE breaks if we don't wait here.
+	 * Panel bug? Needs more studying */
+	msleep(100);
+	return r;
+}
+
+static void dsi_handle_framedone(void)
+{
+	int r;
+	const int channel = 0;
+	bool use_te_trigger;
+
+	use_te_trigger = dsi.te_enabled && !dsi.use_ext_te;
+
+	if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO)
+		DSSDBG("FRAMEDONE\n");
+
+	if (use_te_trigger) {
+		/* enable LP_RX_TO again after the TE */
+		REG_FLD_MOD(DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
+	}
+
+	/* Send BTA after the frame. We need this for the TE to work, as TE
+	 * trigger is only sent for BTAs without preceding packet. Thus we need
+	 * to BTA after the pixel packets so that next BTA will cause TE
+	 * trigger.
+	 *
+	 * This is not needed when TE is not in use, but we do it anyway to
+	 * make sure that the transfer has been completed. It would be more
+	 * optimal, but more complex, to wait only just before starting next
+	 * transfer. */
+	r = dsi_vc_send_bta_sync(channel);
+	if (r)
+		DSSERR("BTA after framedone failed\n");
+
+	/* RX_FIFO_NOT_EMPTY */
+	if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
+		DSSERR("Received error during frame transfer:\n");
+		dsi_vc_flush_receive_data(0);
+	}
+
+#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
+	dispc_fake_vsync_irq();
+#endif
+}
+
+static int dsi_update_thread(void *data)
+{
+	unsigned long timeout;
+	struct omap_dss_device *device;
+	u16 x, y, w, h;
+
+	while (1) {
+		bool sched;
+
+		wait_event_interruptible(dsi.waitqueue,
+				dsi.update_mode == OMAP_DSS_UPDATE_AUTO ||
+				(dsi.update_mode == OMAP_DSS_UPDATE_MANUAL &&
+				 dsi.update_region.dirty == true) ||
+				kthread_should_stop());
+
+		if (kthread_should_stop())
+			break;
+
+		dsi_bus_lock();
+
+		if (dsi.update_mode == OMAP_DSS_UPDATE_DISABLED ||
+				kthread_should_stop()) {
+			dsi_bus_unlock();
+			break;
+		}
+
+		dsi_perf_mark_setup();
+
+		if (dsi.update_region.dirty) {
+			spin_lock(&dsi.update_lock);
+			dsi.active_update_region = dsi.update_region;
+			dsi.update_region.dirty = false;
+			spin_unlock(&dsi.update_lock);
+		}
+
+		device = dsi.active_update_region.device;
+		x = dsi.active_update_region.x;
+		y = dsi.active_update_region.y;
+		w = dsi.active_update_region.w;
+		h = dsi.active_update_region.h;
+
+		if (device->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
+
+			if (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL)
+				dss_setup_partial_planes(device,
+						&x, &y, &w, &h);
+
+			dispc_set_lcd_size(w, h);
+		}
+
+		if (dsi.active_update_region.dirty) {
+			dsi.active_update_region.dirty = false;
+			/* XXX TODO we don't need to send the coords, if they
+			 * are the same that are already programmed to the
+			 * panel. That should speed up manual update a bit */
+			device->driver->setup_update(device, x, y, w, h);
+		}
+
+		dsi_perf_mark_start();
+
+		if (device->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
+			dsi_vc_config_vp(0);
+
+			if (dsi.te_enabled && dsi.use_ext_te)
+				device->driver->wait_for_te(device);
+
+			dsi.framedone_received = false;
+
+			dsi_update_screen_dispc(device, x, y, w, h);
+
+			/* wait for framedone */
+			timeout = msecs_to_jiffies(1000);
+			wait_event_timeout(dsi.waitqueue,
+					dsi.framedone_received == true,
+					timeout);
+
+			if (!dsi.framedone_received) {
+				DSSERR("framedone timeout\n");
+				DSSERR("failed update %d,%d %dx%d\n",
+						x, y, w, h);
+
+				dispc_enable_sidle();
+				dispc_enable_lcd_out(0);
+
+				dsi_reset_tx_fifo(0);
+			} else {
+				dsi_handle_framedone();
+				dsi_perf_show("DISPC");
+			}
+		} else {
+			dsi_update_screen_l4(device, x, y, w, h);
+			dsi_perf_show("L4");
+		}
+
+		sched = atomic_read(&dsi.bus_lock.count) < 0;
+
+		complete_all(&dsi.update_completion);
+
+		dsi_bus_unlock();
+
+		/* XXX We need to give others chance to get the bus lock. Is
+		 * there a better way for this? */
+		if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO && sched)
+			schedule_timeout_interruptible(1);
+	}
+
+	DSSDBG("update thread exiting\n");
+
+	return 0;
+}
+
+
+
+/* Display funcs */
+
+static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
+{
+	int r;
+
+	r = omap_dispc_register_isr(dsi_framedone_irq_callback, NULL,
+			DISPC_IRQ_FRAMEDONE);
+	if (r) {
+		DSSERR("can't get FRAMEDONE irq\n");
+		return r;
+	}
+
+	dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT);
+
+	dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_DSI);
+	dispc_enable_fifohandcheck(1);
+
+	dispc_set_tft_data_lines(dssdev->ctrl.pixel_size);
+
+	{
+		struct omap_video_timings timings = {
+			.hsw		= 1,
+			.hfp		= 1,
+			.hbp		= 1,
+			.vsw		= 1,
+			.vfp		= 0,
+			.vbp		= 0,
+		};
+
+		dispc_set_lcd_timings(&timings);
+	}
+
+	return 0;
+}
+
+static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev)
+{
+	omap_dispc_unregister_isr(dsi_framedone_irq_callback, NULL,
+			DISPC_IRQ_FRAMEDONE);
+}
+
+static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
+{
+	struct dsi_clock_info cinfo;
+	int r;
+
+	/* we always use DSS2_FCK as input clock */
+	cinfo.use_dss2_fck = true;
+	cinfo.regn  = dssdev->phy.dsi.div.regn;
+	cinfo.regm  = dssdev->phy.dsi.div.regm;
+	cinfo.regm3 = dssdev->phy.dsi.div.regm3;
+	cinfo.regm4 = dssdev->phy.dsi.div.regm4;
+	r = dsi_calc_clock_rates(&cinfo);
+	if (r)
+		return r;
+
+	r = dsi_pll_set_clock_div(&cinfo);
+	if (r) {
+		DSSERR("Failed to set dsi clocks\n");
+		return r;
+	}
+
+	return 0;
+}
+
+static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
+{
+	struct dispc_clock_info dispc_cinfo;
+	int r;
+	unsigned long long fck;
+
+	fck = dsi_get_dsi1_pll_rate();
+
+	dispc_cinfo.lck_div = dssdev->phy.dsi.div.lck_div;
+	dispc_cinfo.pck_div = dssdev->phy.dsi.div.pck_div;
+
+	r = dispc_calc_clock_rates(fck, &dispc_cinfo);
+	if (r) {
+		DSSERR("Failed to calc dispc clocks\n");
+		return r;
+	}
+
+	r = dispc_set_clock_div(&dispc_cinfo);
+	if (r) {
+		DSSERR("Failed to set dispc clocks\n");
+		return r;
+	}
+
+	return 0;
+}
+
+static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
+{
+	int r;
+
+	_dsi_print_reset_status();
+
+	r = dsi_pll_init(dssdev, true, true);
+	if (r)
+		goto err0;
+
+	r = dsi_configure_dsi_clocks(dssdev);
+	if (r)
+		goto err1;
+
+	dss_select_clk_source(true, true);
+
+	DSSDBG("PLL OK\n");
+
+	r = dsi_configure_dispc_clocks(dssdev);
+	if (r)
+		goto err2;
+
+	r = dsi_complexio_init(dssdev);
+	if (r)
+		goto err2;
+
+	_dsi_print_reset_status();
+
+	dsi_proto_timings(dssdev);
+	dsi_set_lp_clk_divisor(dssdev);
+
+	if (1)
+		_dsi_print_reset_status();
+
+	r = dsi_proto_config(dssdev);
+	if (r)
+		goto err3;
+
+	/* enable interface */
+	dsi_vc_enable(0, 1);
+	dsi_if_enable(1);
+	dsi_force_tx_stop_mode_io();
+
+	if (dssdev->driver->enable) {
+		r = dssdev->driver->enable(dssdev);
+		if (r)
+			goto err4;
+	}
+
+	/* enable high-speed after initial config */
+	dsi_vc_enable_hs(0, 1);
+
+	return 0;
+err4:
+	dsi_if_enable(0);
+err3:
+	dsi_complexio_uninit();
+err2:
+	dss_select_clk_source(false, false);
+err1:
+	dsi_pll_uninit();
+err0:
+	return r;
+}
+
+static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev)
+{
+	if (dssdev->driver->disable)
+		dssdev->driver->disable(dssdev);
+
+	dss_select_clk_source(false, false);
+	dsi_complexio_uninit();
+	dsi_pll_uninit();
+}
+
+static int dsi_core_init(void)
+{
+	/* Autoidle */
+	REG_FLD_MOD(DSI_SYSCONFIG, 1, 0, 0);
+
+	/* ENWAKEUP */
+	REG_FLD_MOD(DSI_SYSCONFIG, 1, 2, 2);
+
+	/* SIDLEMODE smart-idle */
+	REG_FLD_MOD(DSI_SYSCONFIG, 2, 4, 3);
+
+	_dsi_initialize_irq();
+
+	return 0;
+}
+
+static int dsi_display_enable(struct omap_dss_device *dssdev)
+{
+	int r = 0;
+
+	DSSDBG("dsi_display_enable\n");
+
+	mutex_lock(&dsi.lock);
+	dsi_bus_lock();
+
+	r = omap_dss_start_device(dssdev);
+	if (r) {
+		DSSERR("failed to start device\n");
+		goto err0;
+	}
+
+	if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
+		DSSERR("dssdev already enabled\n");
+		r = -EINVAL;
+		goto err1;
+	}
+
+	enable_clocks(1);
+	dsi_enable_pll_clock(1);
+
+	r = _dsi_reset();
+	if (r)
+		goto err2;
+
+	dsi_core_init();
+
+	r = dsi_display_init_dispc(dssdev);
+	if (r)
+		goto err2;
+
+	r = dsi_display_init_dsi(dssdev);
+	if (r)
+		goto err3;
+
+	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+	dsi.use_ext_te = dssdev->phy.dsi.ext_te;
+	r = dsi_set_te(dssdev, dsi.te_enabled);
+	if (r)
+		goto err4;
+
+	dsi_set_update_mode(dssdev, dsi.user_update_mode);
+
+	dsi_bus_unlock();
+	mutex_unlock(&dsi.lock);
+
+	return 0;
+
+err4:
+
+	dsi_display_uninit_dsi(dssdev);
+err3:
+	dsi_display_uninit_dispc(dssdev);
+err2:
+	enable_clocks(0);
+	dsi_enable_pll_clock(0);
+err1:
+	omap_dss_stop_device(dssdev);
+err0:
+	dsi_bus_unlock();
+	mutex_unlock(&dsi.lock);
+	DSSDBG("dsi_display_enable FAILED\n");
+	return r;
+}
+
+static void dsi_display_disable(struct omap_dss_device *dssdev)
+{
+	DSSDBG("dsi_display_disable\n");
+
+	mutex_lock(&dsi.lock);
+	dsi_bus_lock();
+
+	if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED ||
+			dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
+		goto end;
+
+	dsi.update_mode = OMAP_DSS_UPDATE_DISABLED;
+	dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+
+	dsi_display_uninit_dispc(dssdev);
+
+	dsi_display_uninit_dsi(dssdev);
+
+	enable_clocks(0);
+	dsi_enable_pll_clock(0);
+
+	omap_dss_stop_device(dssdev);
+end:
+	dsi_bus_unlock();
+	mutex_unlock(&dsi.lock);
+}
+
+static int dsi_display_suspend(struct omap_dss_device *dssdev)
+{
+	DSSDBG("dsi_display_suspend\n");
+
+	mutex_lock(&dsi.lock);
+	dsi_bus_lock();
+
+	if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED ||
+			dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
+		goto end;
+
+	dsi.update_mode = OMAP_DSS_UPDATE_DISABLED;
+	dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+
+	dsi_display_uninit_dispc(dssdev);
+
+	dsi_display_uninit_dsi(dssdev);
+
+	enable_clocks(0);
+	dsi_enable_pll_clock(0);
+end:
+	dsi_bus_unlock();
+	mutex_unlock(&dsi.lock);
+
+	return 0;
+}
+
+static int dsi_display_resume(struct omap_dss_device *dssdev)
+{
+	int r;
+
+	DSSDBG("dsi_display_resume\n");
+
+	mutex_lock(&dsi.lock);
+	dsi_bus_lock();
+
+	if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
+		DSSERR("dssdev not suspended\n");
+		r = -EINVAL;
+		goto err0;
+	}
+
+	enable_clocks(1);
+	dsi_enable_pll_clock(1);
+
+	r = _dsi_reset();
+	if (r)
+		goto err1;
+
+	dsi_core_init();
+
+	r = dsi_display_init_dispc(dssdev);
+	if (r)
+		goto err1;
+
+	r = dsi_display_init_dsi(dssdev);
+	if (r)
+		goto err2;
+
+	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+	r = dsi_set_te(dssdev, dsi.te_enabled);
+	if (r)
+		goto err2;
+
+	dsi_set_update_mode(dssdev, dsi.user_update_mode);
+
+	dsi_bus_unlock();
+	mutex_unlock(&dsi.lock);
+
+	return 0;
+
+err2:
+	dsi_display_uninit_dispc(dssdev);
+err1:
+	enable_clocks(0);
+	dsi_enable_pll_clock(0);
+err0:
+	dsi_bus_unlock();
+	mutex_unlock(&dsi.lock);
+	DSSDBG("dsi_display_resume FAILED\n");
+	return r;
+}
+
+static int dsi_display_update(struct omap_dss_device *dssdev,
+			u16 x, u16 y, u16 w, u16 h)
+{
+	int r = 0;
+	u16 dw, dh;
+
+	DSSDBG("dsi_display_update(%d,%d %dx%d)\n", x, y, w, h);
+
+	mutex_lock(&dsi.lock);
+
+	if (dsi.update_mode != OMAP_DSS_UPDATE_MANUAL)
+		goto end;
+
+	if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+		goto end;
+
+	dssdev->get_resolution(dssdev, &dw, &dh);
+
+	if  (x > dw || y > dh)
+		goto end;
+
+	if (x + w > dw)
+		w = dw - x;
+
+	if (y + h > dh)
+		h = dh - y;
+
+	if (w == 0 || h == 0)
+		goto end;
+
+	if (w == 1) {
+		r = -EINVAL;
+		goto end;
+	}
+
+	dsi_set_update_region(dssdev, x, y, w, h);
+
+	wake_up(&dsi.waitqueue);
+
+end:
+	mutex_unlock(&dsi.lock);
+
+	return r;
+}
+
+static int dsi_display_sync(struct omap_dss_device *dssdev)
+{
+	bool wait;
+
+	DSSDBG("dsi_display_sync()\n");
+
+	mutex_lock(&dsi.lock);
+	dsi_bus_lock();
+
+	if (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL &&
+			dsi.update_region.dirty) {
+		INIT_COMPLETION(dsi.update_completion);
+		wait = true;
+	} else {
+		wait = false;
+	}
+
+	dsi_bus_unlock();
+	mutex_unlock(&dsi.lock);
+
+	if (wait)
+		wait_for_completion_interruptible(&dsi.update_completion);
+
+	DSSDBG("dsi_display_sync() done\n");
+	return 0;
+}
+
+static int dsi_display_set_update_mode(struct omap_dss_device *dssdev,
+		enum omap_dss_update_mode mode)
+{
+	int r = 0;
+
+	DSSDBGF("%d", mode);
+
+	mutex_lock(&dsi.lock);
+	dsi_bus_lock();
+
+	dsi.user_update_mode = mode;
+	r = dsi_set_update_mode(dssdev, mode);
+
+	dsi_bus_unlock();
+	mutex_unlock(&dsi.lock);
+
+	return r;
+}
+
+static enum omap_dss_update_mode dsi_display_get_update_mode(
+		struct omap_dss_device *dssdev)
+{
+	return dsi.update_mode;
+}
+
+
+static int dsi_display_enable_te(struct omap_dss_device *dssdev, bool enable)
+{
+	int r = 0;
+
+	DSSDBGF("%d", enable);
+
+	if (!dssdev->driver->enable_te)
+		return -ENOENT;
+
+	dsi_bus_lock();
+
+	dsi.te_enabled = enable;
+
+	if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+		goto end;
+
+	r = dsi_set_te(dssdev, enable);
+end:
+	dsi_bus_unlock();
+
+	return r;
+}
+
+static int dsi_display_get_te(struct omap_dss_device *dssdev)
+{
+	return dsi.te_enabled;
+}
+
+static int dsi_display_set_rotate(struct omap_dss_device *dssdev, u8 rotate)
+{
+
+	DSSDBGF("%d", rotate);
+
+	if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
+		return -EINVAL;
+
+	dsi_bus_lock();
+	dssdev->driver->set_rotate(dssdev, rotate);
+	if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO) {
+		u16 w, h;
+		/* the display dimensions may have changed, so set a new
+		 * update region */
+		dssdev->get_resolution(dssdev, &w, &h);
+		dsi_set_update_region(dssdev, 0, 0, w, h);
+	}
+	dsi_bus_unlock();
+
+	return 0;
+}
+
+static u8 dsi_display_get_rotate(struct omap_dss_device *dssdev)
+{
+	if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
+		return 0;
+
+	return dssdev->driver->get_rotate(dssdev);
+}
+
+static int dsi_display_set_mirror(struct omap_dss_device *dssdev, bool mirror)
+{
+	DSSDBGF("%d", mirror);
+
+	if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror)
+		return -EINVAL;
+
+	dsi_bus_lock();
+	dssdev->driver->set_mirror(dssdev, mirror);
+	dsi_bus_unlock();
+
+	return 0;
+}
+
+static bool dsi_display_get_mirror(struct omap_dss_device *dssdev)
+{
+	if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror)
+		return 0;
+
+	return dssdev->driver->get_mirror(dssdev);
+}
+
+static int dsi_display_run_test(struct omap_dss_device *dssdev, int test_num)
+{
+	int r;
+
+	if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+		return -EIO;
+
+	DSSDBGF("%d", test_num);
+
+	dsi_bus_lock();
+
+	/* run test first in low speed mode */
+	dsi_vc_enable_hs(0, 0);
+
+	if (dssdev->driver->run_test) {
+		r = dssdev->driver->run_test(dssdev, test_num);
+		if (r)
+			goto end;
+	}
+
+	/* then in high speed */
+	dsi_vc_enable_hs(0, 1);
+
+	if (dssdev->driver->run_test) {
+		r = dssdev->driver->run_test(dssdev, test_num);
+		if (r)
+			goto end;
+	}
+
+end:
+	dsi_vc_enable_hs(0, 1);
+
+	dsi_bus_unlock();
+
+	return r;
+}
+
+static int dsi_display_memory_read(struct omap_dss_device *dssdev,
+		void *buf, size_t size,
+		u16 x, u16 y, u16 w, u16 h)
+{
+	int r;
+
+	DSSDBGF("");
+
+	if (!dssdev->driver->memory_read)
+		return -EINVAL;
+
+	if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+		return -EIO;
+
+	dsi_bus_lock();
+
+	r = dssdev->driver->memory_read(dssdev, buf, size,
+			x, y, w, h);
+
+	/* Memory read usually changes the update area. This will
+	 * force the next update to re-set the update area */
+	dsi.active_update_region.dirty = true;
+
+	dsi_bus_unlock();
+
+	return r;
+}
+
+void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
+		u32 fifo_size, enum omap_burst_size *burst_size,
+		u32 *fifo_low, u32 *fifo_high)
+{
+	unsigned burst_size_bytes;
+
+	*burst_size = OMAP_DSS_BURST_16x32;
+	burst_size_bytes = 16 * 32 / 8;
+
+	*fifo_high = fifo_size - burst_size_bytes;
+	*fifo_low = fifo_size - burst_size_bytes * 8;
+}
+
+int dsi_init_display(struct omap_dss_device *dssdev)
+{
+	DSSDBG("DSI init\n");
+
+	dssdev->enable = dsi_display_enable;
+	dssdev->disable = dsi_display_disable;
+	dssdev->suspend = dsi_display_suspend;
+	dssdev->resume = dsi_display_resume;
+	dssdev->update = dsi_display_update;
+	dssdev->sync = dsi_display_sync;
+	dssdev->set_update_mode = dsi_display_set_update_mode;
+	dssdev->get_update_mode = dsi_display_get_update_mode;
+	dssdev->enable_te = dsi_display_enable_te;
+	dssdev->get_te = dsi_display_get_te;
+
+	dssdev->get_rotate = dsi_display_get_rotate;
+	dssdev->set_rotate = dsi_display_set_rotate;
+
+	dssdev->get_mirror = dsi_display_get_mirror;
+	dssdev->set_mirror = dsi_display_set_mirror;
+
+	dssdev->run_test = dsi_display_run_test;
+	dssdev->memory_read = dsi_display_memory_read;
+
+	/* XXX these should be figured out dynamically */
+	dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
+		OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
+
+	dsi.vc[0].dssdev = dssdev;
+	dsi.vc[1].dssdev = dssdev;
+
+	return 0;
+}
+
+int dsi_init(struct platform_device *pdev)
+{
+	u32 rev;
+	int r;
+	struct sched_param param = {
+		.sched_priority = MAX_USER_RT_PRIO-1
+	};
+
+	spin_lock_init(&dsi.errors_lock);
+	dsi.errors = 0;
+
+	init_completion(&dsi.bta_completion);
+	init_completion(&dsi.update_completion);
+
+	dsi.thread = kthread_create(dsi_update_thread, NULL, "dsi");
+	if (IS_ERR(dsi.thread)) {
+		DSSERR("cannot create kthread\n");
+		r = PTR_ERR(dsi.thread);
+		goto err0;
+	}
+	sched_setscheduler(dsi.thread, SCHED_FIFO, &param);
+
+	init_waitqueue_head(&dsi.waitqueue);
+	spin_lock_init(&dsi.update_lock);
+
+	mutex_init(&dsi.lock);
+	mutex_init(&dsi.bus_lock);
+
+#ifdef DSI_CATCH_MISSING_TE
+	init_timer(&dsi.te_timer);
+	dsi.te_timer.function = dsi_te_timeout;
+	dsi.te_timer.data = 0;
+#endif
+
+	dsi.update_mode = OMAP_DSS_UPDATE_DISABLED;
+	dsi.user_update_mode = OMAP_DSS_UPDATE_DISABLED;
+
+	dsi.base = ioremap(DSI_BASE, DSI_SZ_REGS);
+	if (!dsi.base) {
+		DSSERR("can't ioremap DSI\n");
+		r = -ENOMEM;
+		goto err1;
+	}
+
+	dsi.vdds_dsi_reg = regulator_get(&pdev->dev, "vdds_dsi");
+	if (IS_ERR(dsi.vdds_dsi_reg)) {
+		iounmap(dsi.base);
+		DSSERR("can't get VDDS_DSI regulator\n");
+		r = PTR_ERR(dsi.vdds_dsi_reg);
+		goto err2;
+	}
+
+	enable_clocks(1);
+
+	rev = dsi_read_reg(DSI_REVISION);
+	printk(KERN_INFO "OMAP DSI rev %d.%d\n",
+	       FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+
+	enable_clocks(0);
+
+	wake_up_process(dsi.thread);
+
+	return 0;
+err2:
+	iounmap(dsi.base);
+err1:
+	kthread_stop(dsi.thread);
+err0:
+	return r;
+}
+
+void dsi_exit(void)
+{
+	kthread_stop(dsi.thread);
+
+	regulator_put(dsi.vdds_dsi_reg);
+
+	iounmap(dsi.base);
+
+	DSSDBG("omap_dsi_exit\n");
+}
+
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
new file mode 100644
index 0000000..9b05ee6
--- /dev/null
+++ b/drivers/video/omap2/dss/dss.c
@@ -0,0 +1,596 @@
+/*
+ * linux/drivers/video/omap2/dss/dss.c
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "DSS"
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/seq_file.h>
+#include <linux/clk.h>
+
+#include <plat/display.h>
+#include "dss.h"
+
+#define DSS_BASE			0x48050000
+
+#define DSS_SZ_REGS			SZ_512
+
+struct dss_reg {
+	u16 idx;
+};
+
+#define DSS_REG(idx)			((const struct dss_reg) { idx })
+
+#define DSS_REVISION			DSS_REG(0x0000)
+#define DSS_SYSCONFIG			DSS_REG(0x0010)
+#define DSS_SYSSTATUS			DSS_REG(0x0014)
+#define DSS_IRQSTATUS			DSS_REG(0x0018)
+#define DSS_CONTROL			DSS_REG(0x0040)
+#define DSS_SDI_CONTROL			DSS_REG(0x0044)
+#define DSS_PLL_CONTROL			DSS_REG(0x0048)
+#define DSS_SDI_STATUS			DSS_REG(0x005C)
+
+#define REG_GET(idx, start, end) \
+	FLD_GET(dss_read_reg(idx), start, end)
+
+#define REG_FLD_MOD(idx, val, start, end) \
+	dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end))
+
+static struct {
+	void __iomem    *base;
+
+	struct clk	*dpll4_m4_ck;
+
+	unsigned long	cache_req_pck;
+	unsigned long	cache_prate;
+	struct dss_clock_info cache_dss_cinfo;
+	struct dispc_clock_info cache_dispc_cinfo;
+
+	u32		ctx[DSS_SZ_REGS / sizeof(u32)];
+} dss;
+
+static int _omap_dss_wait_reset(void);
+
+static inline void dss_write_reg(const struct dss_reg idx, u32 val)
+{
+	__raw_writel(val, dss.base + idx.idx);
+}
+
+static inline u32 dss_read_reg(const struct dss_reg idx)
+{
+	return __raw_readl(dss.base + idx.idx);
+}
+
+#define SR(reg) \
+	dss.ctx[(DSS_##reg).idx / sizeof(u32)] = dss_read_reg(DSS_##reg)
+#define RR(reg) \
+	dss_write_reg(DSS_##reg, dss.ctx[(DSS_##reg).idx / sizeof(u32)])
+
+void dss_save_context(void)
+{
+	if (cpu_is_omap24xx())
+		return;
+
+	SR(SYSCONFIG);
+	SR(CONTROL);
+
+#ifdef CONFIG_OMAP2_DSS_SDI
+	SR(SDI_CONTROL);
+	SR(PLL_CONTROL);
+#endif
+}
+
+void dss_restore_context(void)
+{
+	if (_omap_dss_wait_reset())
+		DSSERR("DSS not coming out of reset after sleep\n");
+
+	RR(SYSCONFIG);
+	RR(CONTROL);
+
+#ifdef CONFIG_OMAP2_DSS_SDI
+	RR(SDI_CONTROL);
+	RR(PLL_CONTROL);
+#endif
+}
+
+#undef SR
+#undef RR
+
+void dss_sdi_init(u8 datapairs)
+{
+	u32 l;
+
+	BUG_ON(datapairs > 3 || datapairs < 1);
+
+	l = dss_read_reg(DSS_SDI_CONTROL);
+	l = FLD_MOD(l, 0xf, 19, 15);		/* SDI_PDIV */
+	l = FLD_MOD(l, datapairs-1, 3, 2);	/* SDI_PRSEL */
+	l = FLD_MOD(l, 2, 1, 0);		/* SDI_BWSEL */
+	dss_write_reg(DSS_SDI_CONTROL, l);
+
+	l = dss_read_reg(DSS_PLL_CONTROL);
+	l = FLD_MOD(l, 0x7, 25, 22);	/* SDI_PLL_FREQSEL */
+	l = FLD_MOD(l, 0xb, 16, 11);	/* SDI_PLL_REGN */
+	l = FLD_MOD(l, 0xb4, 10, 1);	/* SDI_PLL_REGM */
+	dss_write_reg(DSS_PLL_CONTROL, l);
+}
+
+int dss_sdi_enable(void)
+{
+	unsigned long timeout;
+
+	dispc_pck_free_enable(1);
+
+	/* Reset SDI PLL */
+	REG_FLD_MOD(DSS_PLL_CONTROL, 1, 18, 18); /* SDI_PLL_SYSRESET */
+	udelay(1);	/* wait 2x PCLK */
+
+	/* Lock SDI PLL */
+	REG_FLD_MOD(DSS_PLL_CONTROL, 1, 28, 28); /* SDI_PLL_GOBIT */
+
+	/* Waiting for PLL lock request to complete */
+	timeout = jiffies + msecs_to_jiffies(500);
+	while (dss_read_reg(DSS_SDI_STATUS) & (1 << 6)) {
+		if (time_after_eq(jiffies, timeout)) {
+			DSSERR("PLL lock request timed out\n");
+			goto err1;
+		}
+	}
+
+	/* Clearing PLL_GO bit */
+	REG_FLD_MOD(DSS_PLL_CONTROL, 0, 28, 28);
+
+	/* Waiting for PLL to lock */
+	timeout = jiffies + msecs_to_jiffies(500);
+	while (!(dss_read_reg(DSS_SDI_STATUS) & (1 << 5))) {
+		if (time_after_eq(jiffies, timeout)) {
+			DSSERR("PLL lock timed out\n");
+			goto err1;
+		}
+	}
+
+	dispc_lcd_enable_signal(1);
+
+	/* Waiting for SDI reset to complete */
+	timeout = jiffies + msecs_to_jiffies(500);
+	while (!(dss_read_reg(DSS_SDI_STATUS) & (1 << 2))) {
+		if (time_after_eq(jiffies, timeout)) {
+			DSSERR("SDI reset timed out\n");
+			goto err2;
+		}
+	}
+
+	return 0;
+
+ err2:
+	dispc_lcd_enable_signal(0);
+ err1:
+	/* Reset SDI PLL */
+	REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
+
+	dispc_pck_free_enable(0);
+
+	return -ETIMEDOUT;
+}
+
+void dss_sdi_disable(void)
+{
+	dispc_lcd_enable_signal(0);
+
+	dispc_pck_free_enable(0);
+
+	/* Reset SDI PLL */
+	REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
+}
+
+void dss_dump_clocks(struct seq_file *s)
+{
+	unsigned long dpll4_ck_rate;
+	unsigned long dpll4_m4_ck_rate;
+
+	dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+	dpll4_ck_rate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
+	dpll4_m4_ck_rate = clk_get_rate(dss.dpll4_m4_ck);
+
+	seq_printf(s, "- DSS -\n");
+
+	seq_printf(s, "dpll4_ck %lu\n", dpll4_ck_rate);
+
+	seq_printf(s, "dss1_alwon_fclk = %lu / %lu * 2 = %lu\n",
+			dpll4_ck_rate,
+			dpll4_ck_rate / dpll4_m4_ck_rate,
+			dss_clk_get_rate(DSS_CLK_FCK1));
+
+	dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+}
+
+void dss_dump_regs(struct seq_file *s)
+{
+#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r))
+
+	dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+	DUMPREG(DSS_REVISION);
+	DUMPREG(DSS_SYSCONFIG);
+	DUMPREG(DSS_SYSSTATUS);
+	DUMPREG(DSS_IRQSTATUS);
+	DUMPREG(DSS_CONTROL);
+	DUMPREG(DSS_SDI_CONTROL);
+	DUMPREG(DSS_PLL_CONTROL);
+	DUMPREG(DSS_SDI_STATUS);
+
+	dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+#undef DUMPREG
+}
+
+void dss_select_clk_source(bool dsi, bool dispc)
+{
+	u32 r;
+	r = dss_read_reg(DSS_CONTROL);
+	r = FLD_MOD(r, dsi, 1, 1);	/* DSI_CLK_SWITCH */
+	r = FLD_MOD(r, dispc, 0, 0);	/* DISPC_CLK_SWITCH */
+	dss_write_reg(DSS_CONTROL, r);
+}
+
+int dss_get_dsi_clk_source(void)
+{
+	return FLD_GET(dss_read_reg(DSS_CONTROL), 1, 1);
+}
+
+int dss_get_dispc_clk_source(void)
+{
+	return FLD_GET(dss_read_reg(DSS_CONTROL), 0, 0);
+}
+
+/* calculate clock rates using dividers in cinfo */
+int dss_calc_clock_rates(struct dss_clock_info *cinfo)
+{
+	unsigned long prate;
+
+	if (cinfo->fck_div > 16 || cinfo->fck_div == 0)
+		return -EINVAL;
+
+	prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
+
+	cinfo->fck = prate / cinfo->fck_div;
+
+	return 0;
+}
+
+int dss_set_clock_div(struct dss_clock_info *cinfo)
+{
+	unsigned long prate;
+	int r;
+
+	if (cpu_is_omap34xx()) {
+		prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
+		DSSDBG("dpll4_m4 = %ld\n", prate);
+
+		r = clk_set_rate(dss.dpll4_m4_ck, prate / cinfo->fck_div);
+		if (r)
+			return r;
+	}
+
+	DSSDBG("fck = %ld (%d)\n", cinfo->fck, cinfo->fck_div);
+
+	return 0;
+}
+
+int dss_get_clock_div(struct dss_clock_info *cinfo)
+{
+	cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK1);
+
+	if (cpu_is_omap34xx()) {
+		unsigned long prate;
+		prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
+		cinfo->fck_div = prate / (cinfo->fck / 2);
+	} else {
+		cinfo->fck_div = 0;
+	}
+
+	return 0;
+}
+
+unsigned long dss_get_dpll4_rate(void)
+{
+	if (cpu_is_omap34xx())
+		return clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
+	else
+		return 0;
+}
+
+int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
+		struct dss_clock_info *dss_cinfo,
+		struct dispc_clock_info *dispc_cinfo)
+{
+	unsigned long prate;
+	struct dss_clock_info best_dss;
+	struct dispc_clock_info best_dispc;
+
+	unsigned long fck;
+
+	u16 fck_div;
+
+	int match = 0;
+	int min_fck_per_pck;
+
+	prate = dss_get_dpll4_rate();
+
+	fck = dss_clk_get_rate(DSS_CLK_FCK1);
+	if (req_pck == dss.cache_req_pck &&
+			((cpu_is_omap34xx() && prate == dss.cache_prate) ||
+			 dss.cache_dss_cinfo.fck == fck)) {
+		DSSDBG("dispc clock info found from cache.\n");
+		*dss_cinfo = dss.cache_dss_cinfo;
+		*dispc_cinfo = dss.cache_dispc_cinfo;
+		return 0;
+	}
+
+	min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
+
+	if (min_fck_per_pck &&
+		req_pck * min_fck_per_pck > DISPC_MAX_FCK) {
+		DSSERR("Requested pixel clock not possible with the current "
+				"OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
+				"the constraint off.\n");
+		min_fck_per_pck = 0;
+	}
+
+retry:
+	memset(&best_dss, 0, sizeof(best_dss));
+	memset(&best_dispc, 0, sizeof(best_dispc));
+
+	if (cpu_is_omap24xx()) {
+		struct dispc_clock_info cur_dispc;
+		/* XXX can we change the clock on omap2? */
+		fck = dss_clk_get_rate(DSS_CLK_FCK1);
+		fck_div = 1;
+
+		dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc);
+		match = 1;
+
+		best_dss.fck = fck;
+		best_dss.fck_div = fck_div;
+
+		best_dispc = cur_dispc;
+
+		goto found;
+	} else if (cpu_is_omap34xx()) {
+		for (fck_div = 16; fck_div > 0; --fck_div) {
+			struct dispc_clock_info cur_dispc;
+
+			fck = prate / fck_div * 2;
+
+			if (fck > DISPC_MAX_FCK)
+				continue;
+
+			if (min_fck_per_pck &&
+					fck < req_pck * min_fck_per_pck)
+				continue;
+
+			match = 1;
+
+			dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc);
+
+			if (abs(cur_dispc.pck - req_pck) <
+					abs(best_dispc.pck - req_pck)) {
+
+				best_dss.fck = fck;
+				best_dss.fck_div = fck_div;
+
+				best_dispc = cur_dispc;
+
+				if (cur_dispc.pck == req_pck)
+					goto found;
+			}
+		}
+	} else {
+		BUG();
+	}
+
+found:
+	if (!match) {
+		if (min_fck_per_pck) {
+			DSSERR("Could not find suitable clock settings.\n"
+					"Turning FCK/PCK constraint off and"
+					"trying again.\n");
+			min_fck_per_pck = 0;
+			goto retry;
+		}
+
+		DSSERR("Could not find suitable clock settings.\n");
+
+		return -EINVAL;
+	}
+
+	if (dss_cinfo)
+		*dss_cinfo = best_dss;
+	if (dispc_cinfo)
+		*dispc_cinfo = best_dispc;
+
+	dss.cache_req_pck = req_pck;
+	dss.cache_prate = prate;
+	dss.cache_dss_cinfo = best_dss;
+	dss.cache_dispc_cinfo = best_dispc;
+
+	return 0;
+}
+
+
+
+static irqreturn_t dss_irq_handler_omap2(int irq, void *arg)
+{
+	dispc_irq_handler();
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t dss_irq_handler_omap3(int irq, void *arg)
+{
+	u32 irqstatus;
+
+	irqstatus = dss_read_reg(DSS_IRQSTATUS);
+
+	if (irqstatus & (1<<0))	/* DISPC_IRQ */
+		dispc_irq_handler();
+#ifdef CONFIG_OMAP2_DSS_DSI
+	if (irqstatus & (1<<1))	/* DSI_IRQ */
+		dsi_irq_handler();
+#endif
+
+	return IRQ_HANDLED;
+}
+
+static int _omap_dss_wait_reset(void)
+{
+	unsigned timeout = 1000;
+
+	while (REG_GET(DSS_SYSSTATUS, 0, 0) == 0) {
+		udelay(1);
+		if (!--timeout) {
+			DSSERR("soft reset failed\n");
+			return -ENODEV;
+		}
+	}
+
+	return 0;
+}
+
+static int _omap_dss_reset(void)
+{
+	/* Soft reset */
+	REG_FLD_MOD(DSS_SYSCONFIG, 1, 1, 1);
+	return _omap_dss_wait_reset();
+}
+
+void dss_set_venc_output(enum omap_dss_venc_type type)
+{
+	int l = 0;
+
+	if (type == OMAP_DSS_VENC_TYPE_COMPOSITE)
+		l = 0;
+	else if (type == OMAP_DSS_VENC_TYPE_SVIDEO)
+		l = 1;
+	else
+		BUG();
+
+	/* venc out selection. 0 = comp, 1 = svideo */
+	REG_FLD_MOD(DSS_CONTROL, l, 6, 6);
+}
+
+void dss_set_dac_pwrdn_bgz(bool enable)
+{
+	REG_FLD_MOD(DSS_CONTROL, enable, 5, 5);	/* DAC Power-Down Control */
+}
+
+int dss_init(bool skip_init)
+{
+	int r;
+	u32 rev;
+
+	dss.base = ioremap(DSS_BASE, DSS_SZ_REGS);
+	if (!dss.base) {
+		DSSERR("can't ioremap DSS\n");
+		r = -ENOMEM;
+		goto fail0;
+	}
+
+	if (!skip_init) {
+		/* disable LCD and DIGIT output. This seems to fix the synclost
+		 * problem that we get, if the bootloader starts the DSS and
+		 * the kernel resets it */
+		omap_writel(omap_readl(0x48050440) & ~0x3, 0x48050440);
+
+		/* We need to wait here a bit, otherwise we sometimes start to
+		 * get synclost errors, and after that only power cycle will
+		 * restore DSS functionality. I have no idea why this happens.
+		 * And we have to wait _before_ resetting the DSS, but after
+		 * enabling clocks.
+		 */
+		msleep(50);
+
+		_omap_dss_reset();
+	}
+
+	/* autoidle */
+	REG_FLD_MOD(DSS_SYSCONFIG, 1, 0, 0);
+
+	/* Select DPLL */
+	REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
+
+#ifdef CONFIG_OMAP2_DSS_VENC
+	REG_FLD_MOD(DSS_CONTROL, 1, 4, 4);	/* venc dac demen */
+	REG_FLD_MOD(DSS_CONTROL, 1, 3, 3);	/* venc clock 4x enable */
+	REG_FLD_MOD(DSS_CONTROL, 0, 2, 2);	/* venc clock mode = normal */
+#endif
+
+	r = request_irq(INT_24XX_DSS_IRQ,
+			cpu_is_omap24xx()
+			? dss_irq_handler_omap2
+			: dss_irq_handler_omap3,
+			0, "OMAP DSS", NULL);
+
+	if (r < 0) {
+		DSSERR("omap2 dss: request_irq failed\n");
+		goto fail1;
+	}
+
+	if (cpu_is_omap34xx()) {
+		dss.dpll4_m4_ck = clk_get(NULL, "dpll4_m4_ck");
+		if (IS_ERR(dss.dpll4_m4_ck)) {
+			DSSERR("Failed to get dpll4_m4_ck\n");
+			r = PTR_ERR(dss.dpll4_m4_ck);
+			goto fail2;
+		}
+	}
+
+	dss_save_context();
+
+	rev = dss_read_reg(DSS_REVISION);
+	printk(KERN_INFO "OMAP DSS rev %d.%d\n",
+			FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+
+	return 0;
+
+fail2:
+	free_irq(INT_24XX_DSS_IRQ, NULL);
+fail1:
+	iounmap(dss.base);
+fail0:
+	return r;
+}
+
+void dss_exit(void)
+{
+	if (cpu_is_omap34xx())
+		clk_put(dss.dpll4_m4_ck);
+
+	free_irq(INT_24XX_DSS_IRQ, NULL);
+
+	iounmap(dss.base);
+}
+
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
new file mode 100644
index 0000000..8da5ac4
--- /dev/null
+++ b/drivers/video/omap2/dss/dss.h
@@ -0,0 +1,370 @@
+/*
+ * linux/drivers/video/omap2/dss/dss.h
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAP2_DSS_H
+#define __OMAP2_DSS_H
+
+#ifdef CONFIG_OMAP2_DSS_DEBUG_SUPPORT
+#define DEBUG
+#endif
+
+#ifdef DEBUG
+extern unsigned int dss_debug;
+#ifdef DSS_SUBSYS_NAME
+#define DSSDBG(format, ...) \
+	if (dss_debug) \
+		printk(KERN_DEBUG "omapdss " DSS_SUBSYS_NAME ": " format, \
+		## __VA_ARGS__)
+#else
+#define DSSDBG(format, ...) \
+	if (dss_debug) \
+		printk(KERN_DEBUG "omapdss: " format, ## __VA_ARGS__)
+#endif
+
+#ifdef DSS_SUBSYS_NAME
+#define DSSDBGF(format, ...) \
+	if (dss_debug) \
+		printk(KERN_DEBUG "omapdss " DSS_SUBSYS_NAME \
+				": %s(" format ")\n", \
+				__func__, \
+				## __VA_ARGS__)
+#else
+#define DSSDBGF(format, ...) \
+	if (dss_debug) \
+		printk(KERN_DEBUG "omapdss: " \
+				": %s(" format ")\n", \
+				__func__, \
+				## __VA_ARGS__)
+#endif
+
+#else /* DEBUG */
+#define DSSDBG(format, ...)
+#define DSSDBGF(format, ...)
+#endif
+
+
+#ifdef DSS_SUBSYS_NAME
+#define DSSERR(format, ...) \
+	printk(KERN_ERR "omapdss " DSS_SUBSYS_NAME " error: " format, \
+	## __VA_ARGS__)
+#else
+#define DSSERR(format, ...) \
+	printk(KERN_ERR "omapdss error: " format, ## __VA_ARGS__)
+#endif
+
+#ifdef DSS_SUBSYS_NAME
+#define DSSINFO(format, ...) \
+	printk(KERN_INFO "omapdss " DSS_SUBSYS_NAME ": " format, \
+	## __VA_ARGS__)
+#else
+#define DSSINFO(format, ...) \
+	printk(KERN_INFO "omapdss: " format, ## __VA_ARGS__)
+#endif
+
+#ifdef DSS_SUBSYS_NAME
+#define DSSWARN(format, ...) \
+	printk(KERN_WARNING "omapdss " DSS_SUBSYS_NAME ": " format, \
+	## __VA_ARGS__)
+#else
+#define DSSWARN(format, ...) \
+	printk(KERN_WARNING "omapdss: " format, ## __VA_ARGS__)
+#endif
+
+/* OMAP TRM gives bitfields as start:end, where start is the higher bit
+   number. For example 7:0 */
+#define FLD_MASK(start, end)	(((1 << ((start) - (end) + 1)) - 1) << (end))
+#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
+#define FLD_GET(val, start, end) (((val) & FLD_MASK(start, end)) >> (end))
+#define FLD_MOD(orig, val, start, end) \
+	(((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
+
+#define DISPC_MAX_FCK 173000000
+
+enum omap_burst_size {
+	OMAP_DSS_BURST_4x32 = 0,
+	OMAP_DSS_BURST_8x32 = 1,
+	OMAP_DSS_BURST_16x32 = 2,
+};
+
+enum omap_parallel_interface_mode {
+	OMAP_DSS_PARALLELMODE_BYPASS,		/* MIPI DPI */
+	OMAP_DSS_PARALLELMODE_RFBI,		/* MIPI DBI */
+	OMAP_DSS_PARALLELMODE_DSI,
+};
+
+enum dss_clock {
+	DSS_CLK_ICK	= 1 << 0,
+	DSS_CLK_FCK1	= 1 << 1,
+	DSS_CLK_FCK2	= 1 << 2,
+	DSS_CLK_54M	= 1 << 3,
+	DSS_CLK_96M	= 1 << 4,
+};
+
+struct dss_clock_info {
+	/* rates that we get with dividers below */
+	unsigned long fck;
+
+	/* dividers */
+	u16 fck_div;
+};
+
+struct dispc_clock_info {
+	/* rates that we get with dividers below */
+	unsigned long lck;
+	unsigned long pck;
+
+	/* dividers */
+	u16 lck_div;
+	u16 pck_div;
+};
+
+struct dsi_clock_info {
+	/* rates that we get with dividers below */
+	unsigned long fint;
+	unsigned long clkin4ddr;
+	unsigned long clkin;
+	unsigned long dsi1_pll_fclk;
+	unsigned long dsi2_pll_fclk;
+
+	unsigned long lp_clk;
+
+	/* dividers */
+	u16 regn;
+	u16 regm;
+	u16 regm3;
+	u16 regm4;
+
+	u16 lp_clk_div;
+
+	u8 highfreq;
+	bool use_dss2_fck;
+};
+
+struct seq_file;
+struct platform_device;
+
+/* core */
+void dss_clk_enable(enum dss_clock clks);
+void dss_clk_disable(enum dss_clock clks);
+unsigned long dss_clk_get_rate(enum dss_clock clk);
+int dss_need_ctx_restore(void);
+void dss_dump_clocks(struct seq_file *s);
+struct bus_type *dss_get_bus(void);
+
+/* display */
+int dss_suspend_all_devices(void);
+int dss_resume_all_devices(void);
+void dss_disable_all_devices(void);
+
+void dss_init_device(struct platform_device *pdev,
+		struct omap_dss_device *dssdev);
+void dss_uninit_device(struct platform_device *pdev,
+		struct omap_dss_device *dssdev);
+bool dss_use_replication(struct omap_dss_device *dssdev,
+		enum omap_color_mode mode);
+void default_get_overlay_fifo_thresholds(enum omap_plane plane,
+		u32 fifo_size, enum omap_burst_size *burst_size,
+		u32 *fifo_low, u32 *fifo_high);
+
+/* manager */
+int dss_init_overlay_managers(struct platform_device *pdev);
+void dss_uninit_overlay_managers(struct platform_device *pdev);
+int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl);
+void dss_setup_partial_planes(struct omap_dss_device *dssdev,
+				u16 *x, u16 *y, u16 *w, u16 *h);
+void dss_start_update(struct omap_dss_device *dssdev);
+
+/* overlay */
+void dss_init_overlays(struct platform_device *pdev);
+void dss_uninit_overlays(struct platform_device *pdev);
+int dss_check_overlay(struct omap_overlay *ovl, struct omap_dss_device *dssdev);
+void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr);
+#ifdef L4_EXAMPLE
+void dss_overlay_setup_l4_manager(struct omap_overlay_manager *mgr);
+#endif
+void dss_recheck_connections(struct omap_dss_device *dssdev, bool force);
+
+/* DSS */
+int dss_init(bool skip_init);
+void dss_exit(void);
+
+void dss_save_context(void);
+void dss_restore_context(void);
+
+void dss_dump_regs(struct seq_file *s);
+
+void dss_sdi_init(u8 datapairs);
+int dss_sdi_enable(void);
+void dss_sdi_disable(void);
+
+void dss_select_clk_source(bool dsi, bool dispc);
+int dss_get_dsi_clk_source(void);
+int dss_get_dispc_clk_source(void);
+void dss_set_venc_output(enum omap_dss_venc_type type);
+void dss_set_dac_pwrdn_bgz(bool enable);
+
+unsigned long dss_get_dpll4_rate(void);
+int dss_calc_clock_rates(struct dss_clock_info *cinfo);
+int dss_set_clock_div(struct dss_clock_info *cinfo);
+int dss_get_clock_div(struct dss_clock_info *cinfo);
+int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
+		struct dss_clock_info *dss_cinfo,
+		struct dispc_clock_info *dispc_cinfo);
+
+/* SDI */
+int sdi_init(bool skip_init);
+void sdi_exit(void);
+int sdi_init_display(struct omap_dss_device *display);
+
+/* DSI */
+int dsi_init(struct platform_device *pdev);
+void dsi_exit(void);
+
+void dsi_dump_clocks(struct seq_file *s);
+void dsi_dump_regs(struct seq_file *s);
+
+void dsi_save_context(void);
+void dsi_restore_context(void);
+
+int dsi_init_display(struct omap_dss_device *display);
+void dsi_irq_handler(void);
+unsigned long dsi_get_dsi1_pll_rate(void);
+int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo);
+int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck,
+		struct dsi_clock_info *cinfo,
+		struct dispc_clock_info *dispc_cinfo);
+int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk,
+		bool enable_hsdiv);
+void dsi_pll_uninit(void);
+void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
+		u32 fifo_size, enum omap_burst_size *burst_size,
+		u32 *fifo_low, u32 *fifo_high);
+
+/* DPI */
+int dpi_init(void);
+void dpi_exit(void);
+int dpi_init_display(struct omap_dss_device *dssdev);
+
+/* DISPC */
+int dispc_init(void);
+void dispc_exit(void);
+void dispc_dump_clocks(struct seq_file *s);
+void dispc_dump_regs(struct seq_file *s);
+void dispc_irq_handler(void);
+void dispc_fake_vsync_irq(void);
+
+void dispc_save_context(void);
+void dispc_restore_context(void);
+
+void dispc_enable_sidle(void);
+void dispc_disable_sidle(void);
+
+void dispc_lcd_enable_signal_polarity(bool act_high);
+void dispc_lcd_enable_signal(bool enable);
+void dispc_pck_free_enable(bool enable);
+void dispc_enable_fifohandcheck(bool enable);
+
+void dispc_set_lcd_size(u16 width, u16 height);
+void dispc_set_digit_size(u16 width, u16 height);
+u32 dispc_get_plane_fifo_size(enum omap_plane plane);
+void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high);
+void dispc_enable_fifomerge(bool enable);
+void dispc_set_burst_size(enum omap_plane plane,
+		enum omap_burst_size burst_size);
+
+void dispc_set_plane_ba0(enum omap_plane plane, u32 paddr);
+void dispc_set_plane_ba1(enum omap_plane plane, u32 paddr);
+void dispc_set_plane_pos(enum omap_plane plane, u16 x, u16 y);
+void dispc_set_plane_size(enum omap_plane plane, u16 width, u16 height);
+void dispc_set_channel_out(enum omap_plane plane,
+		enum omap_channel channel_out);
+
+int dispc_setup_plane(enum omap_plane plane,
+		      u32 paddr, u16 screen_width,
+		      u16 pos_x, u16 pos_y,
+		      u16 width, u16 height,
+		      u16 out_width, u16 out_height,
+		      enum omap_color_mode color_mode,
+		      bool ilace,
+		      enum omap_dss_rotation_type rotation_type,
+		      u8 rotation, bool mirror,
+		      u8 global_alpha);
+
+bool dispc_go_busy(enum omap_channel channel);
+void dispc_go(enum omap_channel channel);
+void dispc_enable_lcd_out(bool enable);
+void dispc_enable_digit_out(bool enable);
+int dispc_enable_plane(enum omap_plane plane, bool enable);
+void dispc_enable_replication(enum omap_plane plane, bool enable);
+
+void dispc_set_parallel_interface_mode(enum omap_parallel_interface_mode mode);
+void dispc_set_tft_data_lines(u8 data_lines);
+void dispc_set_lcd_display_type(enum omap_lcd_display_type type);
+void dispc_set_loadmode(enum omap_dss_load_mode mode);
+
+void dispc_set_default_color(enum omap_channel channel, u32 color);
+u32 dispc_get_default_color(enum omap_channel channel);
+void dispc_set_trans_key(enum omap_channel ch,
+		enum omap_dss_trans_key_type type,
+		u32 trans_key);
+void dispc_get_trans_key(enum omap_channel ch,
+		enum omap_dss_trans_key_type *type,
+		u32 *trans_key);
+void dispc_enable_trans_key(enum omap_channel ch, bool enable);
+void dispc_enable_alpha_blending(enum omap_channel ch, bool enable);
+bool dispc_trans_key_enabled(enum omap_channel ch);
+bool dispc_alpha_blending_enabled(enum omap_channel ch);
+
+bool dispc_lcd_timings_ok(struct omap_video_timings *timings);
+void dispc_set_lcd_timings(struct omap_video_timings *timings);
+unsigned long dispc_fclk_rate(void);
+unsigned long dispc_lclk_rate(void);
+unsigned long dispc_pclk_rate(void);
+void dispc_set_pol_freq(enum omap_panel_config config, u8 acbi, u8 acb);
+void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck,
+		struct dispc_clock_info *cinfo);
+int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
+		struct dispc_clock_info *cinfo);
+int dispc_set_clock_div(struct dispc_clock_info *cinfo);
+int dispc_get_clock_div(struct dispc_clock_info *cinfo);
+
+
+/* VENC */
+int venc_init(struct platform_device *pdev);
+void venc_exit(void);
+void venc_dump_regs(struct seq_file *s);
+int venc_init_display(struct omap_dss_device *display);
+
+/* RFBI */
+int rfbi_init(void);
+void rfbi_exit(void);
+void rfbi_dump_regs(struct seq_file *s);
+
+int rfbi_configure(int rfbi_module, int bpp, int lines);
+void rfbi_enable_rfbi(bool enable);
+void rfbi_transfer_area(u16 width, u16 height,
+			     void (callback)(void *data), void *data);
+void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t);
+unsigned long rfbi_get_max_tx_rate(void);
+int rfbi_init_display(struct omap_dss_device *display);
+
+#endif
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
new file mode 100644
index 0000000..27d9c46
--- /dev/null
+++ b/drivers/video/omap2/dss/manager.c
@@ -0,0 +1,1487 @@
+/*
+ * linux/drivers/video/omap2/dss/manager.c
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "MANAGER"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/jiffies.h>
+
+#include <plat/display.h>
+#include <plat/cpu.h>
+
+#include "dss.h"
+
+static int num_managers;
+static struct list_head manager_list;
+
+static ssize_t manager_name_show(struct omap_overlay_manager *mgr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n", mgr->name);
+}
+
+static ssize_t manager_display_show(struct omap_overlay_manager *mgr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			mgr->device ? mgr->device->name : "<none>");
+}
+
+static ssize_t manager_display_store(struct omap_overlay_manager *mgr,
+		const char *buf, size_t size)
+{
+	int r = 0;
+	size_t len = size;
+	struct omap_dss_device *dssdev = NULL;
+
+	int match(struct omap_dss_device *dssdev, void *data)
+	{
+		const char *str = data;
+		return sysfs_streq(dssdev->name, str);
+	}
+
+	if (buf[size-1] == '\n')
+		--len;
+
+	if (len > 0)
+		dssdev = omap_dss_find_device((void *)buf, match);
+
+	if (len > 0 && dssdev == NULL)
+		return -EINVAL;
+
+	if (dssdev)
+		DSSDBG("display %s found\n", dssdev->name);
+
+	if (mgr->device) {
+		r = mgr->unset_device(mgr);
+		if (r) {
+			DSSERR("failed to unset display\n");
+			goto put_device;
+		}
+	}
+
+	if (dssdev) {
+		r = mgr->set_device(mgr, dssdev);
+		if (r) {
+			DSSERR("failed to set manager\n");
+			goto put_device;
+		}
+
+		r = mgr->apply(mgr);
+		if (r) {
+			DSSERR("failed to apply dispc config\n");
+			goto put_device;
+		}
+	}
+
+put_device:
+	if (dssdev)
+		omap_dss_put_device(dssdev);
+
+	return r ? r : size;
+}
+
+static ssize_t manager_default_color_show(struct omap_overlay_manager *mgr,
+					  char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.default_color);
+}
+
+static ssize_t manager_default_color_store(struct omap_overlay_manager *mgr,
+					   const char *buf, size_t size)
+{
+	struct omap_overlay_manager_info info;
+	u32 color;
+	int r;
+
+	if (sscanf(buf, "%d", &color) != 1)
+		return -EINVAL;
+
+	mgr->get_manager_info(mgr, &info);
+
+	info.default_color = color;
+
+	r = mgr->set_manager_info(mgr, &info);
+	if (r)
+		return r;
+
+	r = mgr->apply(mgr);
+	if (r)
+		return r;
+
+	return size;
+}
+
+static const char *trans_key_type_str[] = {
+	"gfx-destination",
+	"video-source",
+};
+
+static ssize_t manager_trans_key_type_show(struct omap_overlay_manager *mgr,
+					   char *buf)
+{
+	enum omap_dss_trans_key_type key_type;
+
+	key_type = mgr->info.trans_key_type;
+	BUG_ON(key_type >= ARRAY_SIZE(trans_key_type_str));
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", trans_key_type_str[key_type]);
+}
+
+static ssize_t manager_trans_key_type_store(struct omap_overlay_manager *mgr,
+					    const char *buf, size_t size)
+{
+	enum omap_dss_trans_key_type key_type;
+	struct omap_overlay_manager_info info;
+	int r;
+
+	for (key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+			key_type < ARRAY_SIZE(trans_key_type_str); key_type++) {
+		if (sysfs_streq(buf, trans_key_type_str[key_type]))
+			break;
+	}
+
+	if (key_type == ARRAY_SIZE(trans_key_type_str))
+		return -EINVAL;
+
+	mgr->get_manager_info(mgr, &info);
+
+	info.trans_key_type = key_type;
+
+	r = mgr->set_manager_info(mgr, &info);
+	if (r)
+		return r;
+
+	r = mgr->apply(mgr);
+	if (r)
+		return r;
+
+	return size;
+}
+
+static ssize_t manager_trans_key_value_show(struct omap_overlay_manager *mgr,
+					    char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.trans_key);
+}
+
+static ssize_t manager_trans_key_value_store(struct omap_overlay_manager *mgr,
+					     const char *buf, size_t size)
+{
+	struct omap_overlay_manager_info info;
+	u32 key_value;
+	int r;
+
+	if (sscanf(buf, "%d", &key_value) != 1)
+		return -EINVAL;
+
+	mgr->get_manager_info(mgr, &info);
+
+	info.trans_key = key_value;
+
+	r = mgr->set_manager_info(mgr, &info);
+	if (r)
+		return r;
+
+	r = mgr->apply(mgr);
+	if (r)
+		return r;
+
+	return size;
+}
+
+static ssize_t manager_trans_key_enabled_show(struct omap_overlay_manager *mgr,
+					      char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.trans_enabled);
+}
+
+static ssize_t manager_trans_key_enabled_store(struct omap_overlay_manager *mgr,
+					       const char *buf, size_t size)
+{
+	struct omap_overlay_manager_info info;
+	int enable;
+	int r;
+
+	if (sscanf(buf, "%d", &enable) != 1)
+		return -EINVAL;
+
+	mgr->get_manager_info(mgr, &info);
+
+	info.trans_enabled = enable ? true : false;
+
+	r = mgr->set_manager_info(mgr, &info);
+	if (r)
+		return r;
+
+	r = mgr->apply(mgr);
+	if (r)
+		return r;
+
+	return size;
+}
+
+static ssize_t manager_alpha_blending_enabled_show(
+		struct omap_overlay_manager *mgr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.alpha_enabled);
+}
+
+static ssize_t manager_alpha_blending_enabled_store(
+		struct omap_overlay_manager *mgr,
+		const char *buf, size_t size)
+{
+	struct omap_overlay_manager_info info;
+	int enable;
+	int r;
+
+	if (sscanf(buf, "%d", &enable) != 1)
+		return -EINVAL;
+
+	mgr->get_manager_info(mgr, &info);
+
+	info.alpha_enabled = enable ? true : false;
+
+	r = mgr->set_manager_info(mgr, &info);
+	if (r)
+		return r;
+
+	r = mgr->apply(mgr);
+	if (r)
+		return r;
+
+	return size;
+}
+
+struct manager_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct omap_overlay_manager *, char *);
+	ssize_t	(*store)(struct omap_overlay_manager *, const char *, size_t);
+};
+
+#define MANAGER_ATTR(_name, _mode, _show, _store) \
+	struct manager_attribute manager_attr_##_name = \
+	__ATTR(_name, _mode, _show, _store)
+
+static MANAGER_ATTR(name, S_IRUGO, manager_name_show, NULL);
+static MANAGER_ATTR(display, S_IRUGO|S_IWUSR,
+		manager_display_show, manager_display_store);
+static MANAGER_ATTR(default_color, S_IRUGO|S_IWUSR,
+		manager_default_color_show, manager_default_color_store);
+static MANAGER_ATTR(trans_key_type, S_IRUGO|S_IWUSR,
+		manager_trans_key_type_show, manager_trans_key_type_store);
+static MANAGER_ATTR(trans_key_value, S_IRUGO|S_IWUSR,
+		manager_trans_key_value_show, manager_trans_key_value_store);
+static MANAGER_ATTR(trans_key_enabled, S_IRUGO|S_IWUSR,
+		manager_trans_key_enabled_show,
+		manager_trans_key_enabled_store);
+static MANAGER_ATTR(alpha_blending_enabled, S_IRUGO|S_IWUSR,
+		manager_alpha_blending_enabled_show,
+		manager_alpha_blending_enabled_store);
+
+
+static struct attribute *manager_sysfs_attrs[] = {
+	&manager_attr_name.attr,
+	&manager_attr_display.attr,
+	&manager_attr_default_color.attr,
+	&manager_attr_trans_key_type.attr,
+	&manager_attr_trans_key_value.attr,
+	&manager_attr_trans_key_enabled.attr,
+	&manager_attr_alpha_blending_enabled.attr,
+	NULL
+};
+
+static ssize_t manager_attr_show(struct kobject *kobj, struct attribute *attr,
+		char *buf)
+{
+	struct omap_overlay_manager *manager;
+	struct manager_attribute *manager_attr;
+
+	manager = container_of(kobj, struct omap_overlay_manager, kobj);
+	manager_attr = container_of(attr, struct manager_attribute, attr);
+
+	if (!manager_attr->show)
+		return -ENOENT;
+
+	return manager_attr->show(manager, buf);
+}
+
+static ssize_t manager_attr_store(struct kobject *kobj, struct attribute *attr,
+		const char *buf, size_t size)
+{
+	struct omap_overlay_manager *manager;
+	struct manager_attribute *manager_attr;
+
+	manager = container_of(kobj, struct omap_overlay_manager, kobj);
+	manager_attr = container_of(attr, struct manager_attribute, attr);
+
+	if (!manager_attr->store)
+		return -ENOENT;
+
+	return manager_attr->store(manager, buf, size);
+}
+
+static struct sysfs_ops manager_sysfs_ops = {
+	.show = manager_attr_show,
+	.store = manager_attr_store,
+};
+
+static struct kobj_type manager_ktype = {
+	.sysfs_ops = &manager_sysfs_ops,
+	.default_attrs = manager_sysfs_attrs,
+};
+
+/*
+ * We have 4 levels of cache for the dispc settings. First two are in SW and
+ * the latter two in HW.
+ *
+ * +--------------------+
+ * |overlay/manager_info|
+ * +--------------------+
+ *          v
+ *        apply()
+ *          v
+ * +--------------------+
+ * |     dss_cache      |
+ * +--------------------+
+ *          v
+ *      configure()
+ *          v
+ * +--------------------+
+ * |  shadow registers  |
+ * +--------------------+
+ *          v
+ * VFP or lcd/digit_enable
+ *          v
+ * +--------------------+
+ * |      registers     |
+ * +--------------------+
+ */
+
+struct overlay_cache_data {
+	/* If true, cache changed, but not written to shadow registers. Set
+	 * in apply(), cleared when registers written. */
+	bool dirty;
+	/* If true, shadow registers contain changed values not yet in real
+	 * registers. Set when writing to shadow registers, cleared at
+	 * VSYNC/EVSYNC */
+	bool shadow_dirty;
+
+	bool enabled;
+
+	u32 paddr;
+	void __iomem *vaddr;
+	u16 screen_width;
+	u16 width;
+	u16 height;
+	enum omap_color_mode color_mode;
+	u8 rotation;
+	enum omap_dss_rotation_type rotation_type;
+	bool mirror;
+
+	u16 pos_x;
+	u16 pos_y;
+	u16 out_width;	/* if 0, out_width == width */
+	u16 out_height;	/* if 0, out_height == height */
+	u8 global_alpha;
+
+	enum omap_channel channel;
+	bool replication;
+	bool ilace;
+
+	enum omap_burst_size burst_size;
+	u32 fifo_low;
+	u32 fifo_high;
+
+	bool manual_update;
+};
+
+struct manager_cache_data {
+	/* If true, cache changed, but not written to shadow registers. Set
+	 * in apply(), cleared when registers written. */
+	bool dirty;
+	/* If true, shadow registers contain changed values not yet in real
+	 * registers. Set when writing to shadow registers, cleared at
+	 * VSYNC/EVSYNC */
+	bool shadow_dirty;
+
+	u32 default_color;
+
+	enum omap_dss_trans_key_type trans_key_type;
+	u32 trans_key;
+	bool trans_enabled;
+
+	bool alpha_enabled;
+
+	bool manual_upd_display;
+	bool manual_update;
+	bool do_manual_update;
+
+	/* manual update region */
+	u16 x, y, w, h;
+};
+
+static struct {
+	spinlock_t lock;
+	struct overlay_cache_data overlay_cache[3];
+	struct manager_cache_data manager_cache[2];
+
+	bool irq_enabled;
+} dss_cache;
+
+
+
+static int omap_dss_set_device(struct omap_overlay_manager *mgr,
+		struct omap_dss_device *dssdev)
+{
+	int i;
+	int r;
+
+	if (dssdev->manager) {
+		DSSERR("display '%s' already has a manager '%s'\n",
+			       dssdev->name, dssdev->manager->name);
+		return -EINVAL;
+	}
+
+	if ((mgr->supported_displays & dssdev->type) == 0) {
+		DSSERR("display '%s' does not support manager '%s'\n",
+			       dssdev->name, mgr->name);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < mgr->num_overlays; i++) {
+		struct omap_overlay *ovl = mgr->overlays[i];
+
+		if (ovl->manager != mgr || !ovl->info.enabled)
+			continue;
+
+		r = dss_check_overlay(ovl, dssdev);
+		if (r)
+			return r;
+	}
+
+	dssdev->manager = mgr;
+	mgr->device = dssdev;
+	mgr->device_changed = true;
+
+	return 0;
+}
+
+static int omap_dss_unset_device(struct omap_overlay_manager *mgr)
+{
+	if (!mgr->device) {
+		DSSERR("failed to unset display, display not set.\n");
+		return -EINVAL;
+	}
+
+	mgr->device->manager = NULL;
+	mgr->device = NULL;
+	mgr->device_changed = true;
+
+	return 0;
+}
+
+static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
+{
+	unsigned long timeout = msecs_to_jiffies(500);
+	struct manager_cache_data *mc;
+	enum omap_channel channel;
+	u32 irq;
+	int r;
+	int i;
+
+	if (!mgr->device)
+		return 0;
+
+	if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC) {
+		irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
+		channel = OMAP_DSS_CHANNEL_DIGIT;
+	} else {
+		if (mgr->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
+			enum omap_dss_update_mode mode;
+			mode = mgr->device->get_update_mode(mgr->device);
+			if (mode != OMAP_DSS_UPDATE_AUTO)
+				return 0;
+
+			irq = DISPC_IRQ_FRAMEDONE;
+		} else {
+			irq = DISPC_IRQ_VSYNC;
+		}
+		channel = OMAP_DSS_CHANNEL_LCD;
+	}
+
+	mc = &dss_cache.manager_cache[mgr->id];
+	i = 0;
+	while (1) {
+		unsigned long flags;
+		bool shadow_dirty, dirty;
+
+		spin_lock_irqsave(&dss_cache.lock, flags);
+		dirty = mc->dirty;
+		shadow_dirty = mc->shadow_dirty;
+		spin_unlock_irqrestore(&dss_cache.lock, flags);
+
+		if (!dirty && !shadow_dirty) {
+			r = 0;
+			break;
+		}
+
+		/* 4 iterations is the worst case:
+		 * 1 - initial iteration, dirty = true (between VFP and VSYNC)
+		 * 2 - first VSYNC, dirty = true
+		 * 3 - dirty = false, shadow_dirty = true
+		 * 4 - shadow_dirty = false */
+		if (i++ == 3) {
+			DSSERR("mgr(%d)->wait_for_go() not finishing\n",
+					mgr->id);
+			r = 0;
+			break;
+		}
+
+		r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
+		if (r == -ERESTARTSYS)
+			break;
+
+		if (r) {
+			DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id);
+			break;
+		}
+	}
+
+	return r;
+}
+
+int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
+{
+	unsigned long timeout = msecs_to_jiffies(500);
+	enum omap_channel channel;
+	struct overlay_cache_data *oc;
+	struct omap_dss_device *dssdev;
+	u32 irq;
+	int r;
+	int i;
+
+	if (!ovl->manager || !ovl->manager->device)
+		return 0;
+
+	dssdev = ovl->manager->device;
+
+	if (dssdev->type == OMAP_DISPLAY_TYPE_VENC) {
+		irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
+		channel = OMAP_DSS_CHANNEL_DIGIT;
+	} else {
+		if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
+			enum omap_dss_update_mode mode;
+			mode = dssdev->get_update_mode(dssdev);
+			if (mode != OMAP_DSS_UPDATE_AUTO)
+				return 0;
+
+			irq = DISPC_IRQ_FRAMEDONE;
+		} else {
+			irq = DISPC_IRQ_VSYNC;
+		}
+		channel = OMAP_DSS_CHANNEL_LCD;
+	}
+
+	oc = &dss_cache.overlay_cache[ovl->id];
+	i = 0;
+	while (1) {
+		unsigned long flags;
+		bool shadow_dirty, dirty;
+
+		spin_lock_irqsave(&dss_cache.lock, flags);
+		dirty = oc->dirty;
+		shadow_dirty = oc->shadow_dirty;
+		spin_unlock_irqrestore(&dss_cache.lock, flags);
+
+		if (!dirty && !shadow_dirty) {
+			r = 0;
+			break;
+		}
+
+		/* 4 iterations is the worst case:
+		 * 1 - initial iteration, dirty = true (between VFP and VSYNC)
+		 * 2 - first VSYNC, dirty = true
+		 * 3 - dirty = false, shadow_dirty = true
+		 * 4 - shadow_dirty = false */
+		if (i++ == 3) {
+			DSSERR("ovl(%d)->wait_for_go() not finishing\n",
+					ovl->id);
+			r = 0;
+			break;
+		}
+
+		r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
+		if (r == -ERESTARTSYS)
+			break;
+
+		if (r) {
+			DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id);
+			break;
+		}
+	}
+
+	return r;
+}
+
+static int overlay_enabled(struct omap_overlay *ovl)
+{
+	return ovl->info.enabled && ovl->manager && ovl->manager->device;
+}
+
+/* Is rect1 a subset of rect2? */
+static bool rectangle_subset(int x1, int y1, int w1, int h1,
+		int x2, int y2, int w2, int h2)
+{
+	if (x1 < x2 || y1 < y2)
+		return false;
+
+	if (x1 + w1 > x2 + w2)
+		return false;
+
+	if (y1 + h1 > y2 + h2)
+		return false;
+
+	return true;
+}
+
+/* Do rect1 and rect2 overlap? */
+static bool rectangle_intersects(int x1, int y1, int w1, int h1,
+		int x2, int y2, int w2, int h2)
+{
+	if (x1 >= x2 + w2)
+		return false;
+
+	if (x2 >= x1 + w1)
+		return false;
+
+	if (y1 >= y2 + h2)
+		return false;
+
+	if (y2 >= y1 + h1)
+		return false;
+
+	return true;
+}
+
+static bool dispc_is_overlay_scaled(struct overlay_cache_data *oc)
+{
+	if (oc->out_width != 0 && oc->width != oc->out_width)
+		return true;
+
+	if (oc->out_height != 0 && oc->height != oc->out_height)
+		return true;
+
+	return false;
+}
+
+static int configure_overlay(enum omap_plane plane)
+{
+	struct overlay_cache_data *c;
+	struct manager_cache_data *mc;
+	u16 outw, outh;
+	u16 x, y, w, h;
+	u32 paddr;
+	int r;
+
+	DSSDBGF("%d", plane);
+
+	c = &dss_cache.overlay_cache[plane];
+
+	if (!c->enabled) {
+		dispc_enable_plane(plane, 0);
+		return 0;
+	}
+
+	mc = &dss_cache.manager_cache[c->channel];
+
+	x = c->pos_x;
+	y = c->pos_y;
+	w = c->width;
+	h = c->height;
+	outw = c->out_width == 0 ? c->width : c->out_width;
+	outh = c->out_height == 0 ? c->height : c->out_height;
+	paddr = c->paddr;
+
+	if (c->manual_update && mc->do_manual_update) {
+		unsigned bpp;
+		/* If the overlay is outside the update region, disable it */
+		if (!rectangle_intersects(mc->x, mc->y, mc->w, mc->h,
+					x, y, outw, outh)) {
+			dispc_enable_plane(plane, 0);
+			return 0;
+		}
+
+		switch (c->color_mode) {
+		case OMAP_DSS_COLOR_RGB16:
+		case OMAP_DSS_COLOR_ARGB16:
+		case OMAP_DSS_COLOR_YUV2:
+		case OMAP_DSS_COLOR_UYVY:
+			bpp = 16;
+			break;
+
+		case OMAP_DSS_COLOR_RGB24P:
+			bpp = 24;
+			break;
+
+		case OMAP_DSS_COLOR_RGB24U:
+		case OMAP_DSS_COLOR_ARGB32:
+		case OMAP_DSS_COLOR_RGBA32:
+		case OMAP_DSS_COLOR_RGBX32:
+			bpp = 32;
+			break;
+
+		default:
+			BUG();
+		}
+
+		if (dispc_is_overlay_scaled(c)) {
+			/* If the overlay is scaled, the update area has
+			 * already been enlarged to cover the whole overlay. We
+			 * only need to adjust x/y here */
+			x = c->pos_x - mc->x;
+			y = c->pos_y - mc->y;
+		} else {
+			if (mc->x > c->pos_x) {
+				x = 0;
+				w -= (mc->x - c->pos_x);
+				paddr += (mc->x - c->pos_x) * bpp / 8;
+			} else {
+				x = c->pos_x - mc->x;
+			}
+
+			if (mc->y > c->pos_y) {
+				y = 0;
+				h -= (mc->y - c->pos_y);
+				paddr += (mc->y - c->pos_y) * c->screen_width *
+					bpp / 8;
+			} else {
+				y = c->pos_y - mc->y;
+			}
+
+			if (mc->w < (x+w))
+				w -= (x+w) - (mc->w);
+
+			if (mc->h < (y+h))
+				h -= (y+h) - (mc->h);
+
+			outw = w;
+			outh = h;
+		}
+	}
+
+	r = dispc_setup_plane(plane,
+			paddr,
+			c->screen_width,
+			x, y,
+			w, h,
+			outw, outh,
+			c->color_mode,
+			c->ilace,
+			c->rotation_type,
+			c->rotation,
+			c->mirror,
+			c->global_alpha);
+
+	if (r) {
+		/* this shouldn't happen */
+		DSSERR("dispc_setup_plane failed for ovl %d\n", plane);
+		dispc_enable_plane(plane, 0);
+		return r;
+	}
+
+	dispc_enable_replication(plane, c->replication);
+
+	dispc_set_burst_size(plane, c->burst_size);
+	dispc_setup_plane_fifo(plane, c->fifo_low, c->fifo_high);
+
+	dispc_enable_plane(plane, 1);
+
+	return 0;
+}
+
+static void configure_manager(enum omap_channel channel)
+{
+	struct manager_cache_data *c;
+
+	DSSDBGF("%d", channel);
+
+	c = &dss_cache.manager_cache[channel];
+
+	dispc_set_trans_key(channel, c->trans_key_type, c->trans_key);
+	dispc_enable_trans_key(channel, c->trans_enabled);
+	dispc_enable_alpha_blending(channel, c->alpha_enabled);
+}
+
+/* configure_dispc() tries to write values from cache to shadow registers.
+ * It writes only to those managers/overlays that are not busy.
+ * returns 0 if everything could be written to shadow registers.
+ * returns 1 if not everything could be written to shadow registers. */
+static int configure_dispc(void)
+{
+	struct overlay_cache_data *oc;
+	struct manager_cache_data *mc;
+	const int num_ovls = ARRAY_SIZE(dss_cache.overlay_cache);
+	const int num_mgrs = ARRAY_SIZE(dss_cache.manager_cache);
+	int i;
+	int r;
+	bool mgr_busy[2];
+	bool mgr_go[2];
+	bool busy;
+
+	r = 0;
+	busy = false;
+
+	mgr_busy[0] = dispc_go_busy(0);
+	mgr_busy[1] = dispc_go_busy(1);
+	mgr_go[0] = false;
+	mgr_go[1] = false;
+
+	/* Commit overlay settings */
+	for (i = 0; i < num_ovls; ++i) {
+		oc = &dss_cache.overlay_cache[i];
+		mc = &dss_cache.manager_cache[oc->channel];
+
+		if (!oc->dirty)
+			continue;
+
+		if (oc->manual_update && !mc->do_manual_update)
+			continue;
+
+		if (mgr_busy[oc->channel]) {
+			busy = true;
+			continue;
+		}
+
+		r = configure_overlay(i);
+		if (r)
+			DSSERR("configure_overlay %d failed\n", i);
+
+		oc->dirty = false;
+		oc->shadow_dirty = true;
+		mgr_go[oc->channel] = true;
+	}
+
+	/* Commit manager settings */
+	for (i = 0; i < num_mgrs; ++i) {
+		mc = &dss_cache.manager_cache[i];
+
+		if (!mc->dirty)
+			continue;
+
+		if (mc->manual_update && !mc->do_manual_update)
+			continue;
+
+		if (mgr_busy[i]) {
+			busy = true;
+			continue;
+		}
+
+		configure_manager(i);
+		mc->dirty = false;
+		mc->shadow_dirty = true;
+		mgr_go[i] = true;
+	}
+
+	/* set GO */
+	for (i = 0; i < num_mgrs; ++i) {
+		mc = &dss_cache.manager_cache[i];
+
+		if (!mgr_go[i])
+			continue;
+
+		/* We don't need GO with manual update display. LCD iface will
+		 * always be turned off after frame, and new settings will be
+		 * taken in to use at next update */
+		if (!mc->manual_upd_display)
+			dispc_go(i);
+	}
+
+	if (busy)
+		r = 1;
+	else
+		r = 0;
+
+	return r;
+}
+
+/* Configure dispc for partial update. Return possibly modified update
+ * area */
+void dss_setup_partial_planes(struct omap_dss_device *dssdev,
+		u16 *xi, u16 *yi, u16 *wi, u16 *hi)
+{
+	struct overlay_cache_data *oc;
+	struct manager_cache_data *mc;
+	const int num_ovls = ARRAY_SIZE(dss_cache.overlay_cache);
+	struct omap_overlay_manager *mgr;
+	int i;
+	u16 x, y, w, h;
+	unsigned long flags;
+
+	x = *xi;
+	y = *yi;
+	w = *wi;
+	h = *hi;
+
+	DSSDBG("dispc_setup_partial_planes %d,%d %dx%d\n",
+		*xi, *yi, *wi, *hi);
+
+	mgr = dssdev->manager;
+
+	if (!mgr) {
+		DSSDBG("no manager\n");
+		return;
+	}
+
+	spin_lock_irqsave(&dss_cache.lock, flags);
+
+	/* We need to show the whole overlay if it is scaled. So look for
+	 * those, and make the update area larger if found.
+	 * Also mark the overlay cache dirty */
+	for (i = 0; i < num_ovls; ++i) {
+		unsigned x1, y1, x2, y2;
+		unsigned outw, outh;
+
+		oc = &dss_cache.overlay_cache[i];
+
+		if (oc->channel != mgr->id)
+			continue;
+
+		oc->dirty = true;
+
+		if (!oc->enabled)
+			continue;
+
+		if (!dispc_is_overlay_scaled(oc))
+			continue;
+
+		outw = oc->out_width == 0 ? oc->width : oc->out_width;
+		outh = oc->out_height == 0 ? oc->height : oc->out_height;
+
+		/* is the overlay outside the update region? */
+		if (!rectangle_intersects(x, y, w, h,
+					oc->pos_x, oc->pos_y,
+					outw, outh))
+			continue;
+
+		/* if the overlay totally inside the update region? */
+		if (rectangle_subset(oc->pos_x, oc->pos_y, outw, outh,
+					x, y, w, h))
+			continue;
+
+		if (x > oc->pos_x)
+			x1 = oc->pos_x;
+		else
+			x1 = x;
+
+		if (y > oc->pos_y)
+			y1 = oc->pos_y;
+		else
+			y1 = y;
+
+		if ((x + w) < (oc->pos_x + outw))
+			x2 = oc->pos_x + outw;
+		else
+			x2 = x + w;
+
+		if ((y + h) < (oc->pos_y + outh))
+			y2 = oc->pos_y + outh;
+		else
+			y2 = y + h;
+
+		x = x1;
+		y = y1;
+		w = x2 - x1;
+		h = y2 - y1;
+
+		DSSDBG("changing upd area due to ovl(%d) scaling %d,%d %dx%d\n",
+				i, x, y, w, h);
+	}
+
+	mc = &dss_cache.manager_cache[mgr->id];
+	mc->do_manual_update = true;
+	mc->x = x;
+	mc->y = y;
+	mc->w = w;
+	mc->h = h;
+
+	configure_dispc();
+
+	mc->do_manual_update = false;
+
+	spin_unlock_irqrestore(&dss_cache.lock, flags);
+
+	*xi = x;
+	*yi = y;
+	*wi = w;
+	*hi = h;
+}
+
+void dss_start_update(struct omap_dss_device *dssdev)
+{
+	struct manager_cache_data *mc;
+	struct overlay_cache_data *oc;
+	const int num_ovls = ARRAY_SIZE(dss_cache.overlay_cache);
+	const int num_mgrs = ARRAY_SIZE(dss_cache.manager_cache);
+	struct omap_overlay_manager *mgr;
+	int i;
+
+	mgr = dssdev->manager;
+
+	for (i = 0; i < num_ovls; ++i) {
+		oc = &dss_cache.overlay_cache[i];
+		if (oc->channel != mgr->id)
+			continue;
+
+		oc->shadow_dirty = false;
+	}
+
+	for (i = 0; i < num_mgrs; ++i) {
+		mc = &dss_cache.manager_cache[i];
+		if (mgr->id != i)
+			continue;
+
+		mc->shadow_dirty = false;
+	}
+
+	dispc_enable_lcd_out(1);
+}
+
+static void dss_apply_irq_handler(void *data, u32 mask)
+{
+	struct manager_cache_data *mc;
+	struct overlay_cache_data *oc;
+	const int num_ovls = ARRAY_SIZE(dss_cache.overlay_cache);
+	const int num_mgrs = ARRAY_SIZE(dss_cache.manager_cache);
+	int i, r;
+	bool mgr_busy[2];
+
+	mgr_busy[0] = dispc_go_busy(0);
+	mgr_busy[1] = dispc_go_busy(1);
+
+	spin_lock(&dss_cache.lock);
+
+	for (i = 0; i < num_ovls; ++i) {
+		oc = &dss_cache.overlay_cache[i];
+		if (!mgr_busy[oc->channel])
+			oc->shadow_dirty = false;
+	}
+
+	for (i = 0; i < num_mgrs; ++i) {
+		mc = &dss_cache.manager_cache[i];
+		if (!mgr_busy[i])
+			mc->shadow_dirty = false;
+	}
+
+	r = configure_dispc();
+	if (r == 1)
+		goto end;
+
+	/* re-read busy flags */
+	mgr_busy[0] = dispc_go_busy(0);
+	mgr_busy[1] = dispc_go_busy(1);
+
+	/* keep running as long as there are busy managers, so that
+	 * we can collect overlay-applied information */
+	for (i = 0; i < num_mgrs; ++i) {
+		if (mgr_busy[i])
+			goto end;
+	}
+
+	omap_dispc_unregister_isr(dss_apply_irq_handler, NULL,
+			DISPC_IRQ_VSYNC	| DISPC_IRQ_EVSYNC_ODD |
+			DISPC_IRQ_EVSYNC_EVEN);
+	dss_cache.irq_enabled = false;
+
+end:
+	spin_unlock(&dss_cache.lock);
+}
+
+static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
+{
+	struct overlay_cache_data *oc;
+	struct manager_cache_data *mc;
+	int i;
+	struct omap_overlay *ovl;
+	int num_planes_enabled = 0;
+	bool use_fifomerge;
+	unsigned long flags;
+	int r;
+
+	DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
+
+	spin_lock_irqsave(&dss_cache.lock, flags);
+
+	/* Configure overlays */
+	for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+		struct omap_dss_device *dssdev;
+
+		ovl = omap_dss_get_overlay(i);
+
+		if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
+			continue;
+
+		oc = &dss_cache.overlay_cache[ovl->id];
+
+		if (!overlay_enabled(ovl)) {
+			if (oc->enabled) {
+				oc->enabled = false;
+				oc->dirty = true;
+			}
+			continue;
+		}
+
+		if (!ovl->info_dirty) {
+			if (oc->enabled)
+				++num_planes_enabled;
+			continue;
+		}
+
+		dssdev = ovl->manager->device;
+
+		if (dss_check_overlay(ovl, dssdev)) {
+			if (oc->enabled) {
+				oc->enabled = false;
+				oc->dirty = true;
+			}
+			continue;
+		}
+
+		ovl->info_dirty = false;
+		oc->dirty = true;
+
+		oc->paddr = ovl->info.paddr;
+		oc->vaddr = ovl->info.vaddr;
+		oc->screen_width = ovl->info.screen_width;
+		oc->width = ovl->info.width;
+		oc->height = ovl->info.height;
+		oc->color_mode = ovl->info.color_mode;
+		oc->rotation = ovl->info.rotation;
+		oc->rotation_type = ovl->info.rotation_type;
+		oc->mirror = ovl->info.mirror;
+		oc->pos_x = ovl->info.pos_x;
+		oc->pos_y = ovl->info.pos_y;
+		oc->out_width = ovl->info.out_width;
+		oc->out_height = ovl->info.out_height;
+		oc->global_alpha = ovl->info.global_alpha;
+
+		oc->replication =
+			dss_use_replication(dssdev, ovl->info.color_mode);
+
+		oc->ilace = dssdev->type == OMAP_DISPLAY_TYPE_VENC;
+
+		oc->channel = ovl->manager->id;
+
+		oc->enabled = true;
+
+		oc->manual_update =
+			dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE &&
+			dssdev->get_update_mode(dssdev) != OMAP_DSS_UPDATE_AUTO;
+
+		++num_planes_enabled;
+	}
+
+	/* Configure managers */
+	list_for_each_entry(mgr, &manager_list, list) {
+		struct omap_dss_device *dssdev;
+
+		if (!(mgr->caps & OMAP_DSS_OVL_MGR_CAP_DISPC))
+			continue;
+
+		mc = &dss_cache.manager_cache[mgr->id];
+
+		if (mgr->device_changed) {
+			mgr->device_changed = false;
+			mgr->info_dirty  = true;
+		}
+
+		if (!mgr->info_dirty)
+			continue;
+
+		if (!mgr->device)
+			continue;
+
+		dssdev = mgr->device;
+
+		mgr->info_dirty = false;
+		mc->dirty = true;
+
+		mc->default_color = mgr->info.default_color;
+		mc->trans_key_type = mgr->info.trans_key_type;
+		mc->trans_key = mgr->info.trans_key;
+		mc->trans_enabled = mgr->info.trans_enabled;
+		mc->alpha_enabled = mgr->info.alpha_enabled;
+
+		mc->manual_upd_display =
+			dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
+
+		mc->manual_update =
+			dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE &&
+			dssdev->get_update_mode(dssdev) != OMAP_DSS_UPDATE_AUTO;
+	}
+
+	/* XXX TODO: Try to get fifomerge working. The problem is that it
+	 * affects both managers, not individually but at the same time. This
+	 * means the change has to be well synchronized. I guess the proper way
+	 * is to have a two step process for fifo merge:
+	 *        fifomerge enable:
+	 *             1. disable other planes, leaving one plane enabled
+	 *             2. wait until the planes are disabled on HW
+	 *             3. config merged fifo thresholds, enable fifomerge
+	 *        fifomerge disable:
+	 *             1. config unmerged fifo thresholds, disable fifomerge
+	 *             2. wait until fifo changes are in HW
+	 *             3. enable planes
+	 */
+	use_fifomerge = false;
+
+	/* Configure overlay fifos */
+	for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+		struct omap_dss_device *dssdev;
+		u32 size;
+
+		ovl = omap_dss_get_overlay(i);
+
+		if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
+			continue;
+
+		oc = &dss_cache.overlay_cache[ovl->id];
+
+		if (!oc->enabled)
+			continue;
+
+		dssdev = ovl->manager->device;
+
+		size = dispc_get_plane_fifo_size(ovl->id);
+		if (use_fifomerge)
+			size *= 3;
+
+		switch (dssdev->type) {
+		case OMAP_DISPLAY_TYPE_DPI:
+		case OMAP_DISPLAY_TYPE_DBI:
+		case OMAP_DISPLAY_TYPE_SDI:
+		case OMAP_DISPLAY_TYPE_VENC:
+			default_get_overlay_fifo_thresholds(ovl->id, size,
+					&oc->burst_size, &oc->fifo_low,
+					&oc->fifo_high);
+			break;
+#ifdef CONFIG_OMAP2_DSS_DSI
+		case OMAP_DISPLAY_TYPE_DSI:
+			dsi_get_overlay_fifo_thresholds(ovl->id, size,
+					&oc->burst_size, &oc->fifo_low,
+					&oc->fifo_high);
+			break;
+#endif
+		default:
+			BUG();
+		}
+	}
+
+	r = 0;
+	dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+	if (!dss_cache.irq_enabled) {
+		r = omap_dispc_register_isr(dss_apply_irq_handler, NULL,
+				DISPC_IRQ_VSYNC	| DISPC_IRQ_EVSYNC_ODD |
+				DISPC_IRQ_EVSYNC_EVEN);
+		dss_cache.irq_enabled = true;
+	}
+	configure_dispc();
+	dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+	spin_unlock_irqrestore(&dss_cache.lock, flags);
+
+	return r;
+}
+
+static int dss_check_manager(struct omap_overlay_manager *mgr)
+{
+	/* OMAP supports only graphics source transparency color key and alpha
+	 * blending simultaneously. See TRM 15.4.2.4.2.2 Alpha Mode */
+
+	if (mgr->info.alpha_enabled && mgr->info.trans_enabled &&
+			mgr->info.trans_key_type != OMAP_DSS_COLOR_KEY_GFX_DST)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int omap_dss_mgr_set_info(struct omap_overlay_manager *mgr,
+		struct omap_overlay_manager_info *info)
+{
+	int r;
+	struct omap_overlay_manager_info old_info;
+
+	old_info = mgr->info;
+	mgr->info = *info;
+
+	r = dss_check_manager(mgr);
+	if (r) {
+		mgr->info = old_info;
+		return r;
+	}
+
+	mgr->info_dirty = true;
+
+	return 0;
+}
+
+static void omap_dss_mgr_get_info(struct omap_overlay_manager *mgr,
+		struct omap_overlay_manager_info *info)
+{
+	*info = mgr->info;
+}
+
+static void omap_dss_add_overlay_manager(struct omap_overlay_manager *manager)
+{
+	++num_managers;
+	list_add_tail(&manager->list, &manager_list);
+}
+
+int dss_init_overlay_managers(struct platform_device *pdev)
+{
+	int i, r;
+
+	spin_lock_init(&dss_cache.lock);
+
+	INIT_LIST_HEAD(&manager_list);
+
+	num_managers = 0;
+
+	for (i = 0; i < 2; ++i) {
+		struct omap_overlay_manager *mgr;
+		mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+
+		BUG_ON(mgr == NULL);
+
+		switch (i) {
+		case 0:
+			mgr->name = "lcd";
+			mgr->id = OMAP_DSS_CHANNEL_LCD;
+			mgr->supported_displays =
+				OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
+				OMAP_DISPLAY_TYPE_SDI | OMAP_DISPLAY_TYPE_DSI;
+			break;
+		case 1:
+			mgr->name = "tv";
+			mgr->id = OMAP_DSS_CHANNEL_DIGIT;
+			mgr->supported_displays = OMAP_DISPLAY_TYPE_VENC;
+			break;
+		}
+
+		mgr->set_device = &omap_dss_set_device;
+		mgr->unset_device = &omap_dss_unset_device;
+		mgr->apply = &omap_dss_mgr_apply;
+		mgr->set_manager_info = &omap_dss_mgr_set_info;
+		mgr->get_manager_info = &omap_dss_mgr_get_info;
+		mgr->wait_for_go = &dss_mgr_wait_for_go;
+
+		mgr->caps = OMAP_DSS_OVL_MGR_CAP_DISPC;
+
+		dss_overlay_setup_dispc_manager(mgr);
+
+		omap_dss_add_overlay_manager(mgr);
+
+		r = kobject_init_and_add(&mgr->kobj, &manager_ktype,
+				&pdev->dev.kobj, "manager%d", i);
+
+		if (r) {
+			DSSERR("failed to create sysfs file\n");
+			continue;
+		}
+	}
+
+#ifdef L4_EXAMPLE
+	{
+		int omap_dss_mgr_apply_l4(struct omap_overlay_manager *mgr)
+		{
+			DSSDBG("omap_dss_mgr_apply_l4(%s)\n", mgr->name);
+
+			return 0;
+		}
+
+		struct omap_overlay_manager *mgr;
+		mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+
+		BUG_ON(mgr == NULL);
+
+		mgr->name = "l4";
+		mgr->supported_displays =
+			OMAP_DISPLAY_TYPE_DBI | OMAP_DISPLAY_TYPE_DSI;
+
+		mgr->set_device = &omap_dss_set_device;
+		mgr->unset_device = &omap_dss_unset_device;
+		mgr->apply = &omap_dss_mgr_apply_l4;
+		mgr->set_manager_info = &omap_dss_mgr_set_info;
+		mgr->get_manager_info = &omap_dss_mgr_get_info;
+
+		dss_overlay_setup_l4_manager(mgr);
+
+		omap_dss_add_overlay_manager(mgr);
+
+		r = kobject_init_and_add(&mgr->kobj, &manager_ktype,
+				&pdev->dev.kobj, "managerl4");
+
+		if (r)
+			DSSERR("failed to create sysfs file\n");
+	}
+#endif
+
+	return 0;
+}
+
+void dss_uninit_overlay_managers(struct platform_device *pdev)
+{
+	struct omap_overlay_manager *mgr;
+
+	while (!list_empty(&manager_list)) {
+		mgr = list_first_entry(&manager_list,
+				struct omap_overlay_manager, list);
+		list_del(&mgr->list);
+		kobject_del(&mgr->kobj);
+		kobject_put(&mgr->kobj);
+		kfree(mgr);
+	}
+
+	num_managers = 0;
+}
+
+int omap_dss_get_num_overlay_managers(void)
+{
+	return num_managers;
+}
+EXPORT_SYMBOL(omap_dss_get_num_overlay_managers);
+
+struct omap_overlay_manager *omap_dss_get_overlay_manager(int num)
+{
+	int i = 0;
+	struct omap_overlay_manager *mgr;
+
+	list_for_each_entry(mgr, &manager_list, list) {
+		if (i++ == num)
+			return mgr;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(omap_dss_get_overlay_manager);
+
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
new file mode 100644
index 0000000..b7f9a73
--- /dev/null
+++ b/drivers/video/omap2/dss/overlay.c
@@ -0,0 +1,680 @@
+/*
+ * linux/drivers/video/omap2/dss/overlay.c
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "OVERLAY"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+#include <plat/display.h>
+#include <plat/cpu.h>
+
+#include "dss.h"
+
+static int num_overlays;
+static struct list_head overlay_list;
+
+static ssize_t overlay_name_show(struct omap_overlay *ovl, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n", ovl->name);
+}
+
+static ssize_t overlay_manager_show(struct omap_overlay *ovl, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			ovl->manager ? ovl->manager->name : "<none>");
+}
+
+static ssize_t overlay_manager_store(struct omap_overlay *ovl, const char *buf,
+		size_t size)
+{
+	int i, r;
+	struct omap_overlay_manager *mgr = NULL;
+	struct omap_overlay_manager *old_mgr;
+	int len = size;
+
+	if (buf[size-1] == '\n')
+		--len;
+
+	if (len > 0) {
+		for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
+			mgr = omap_dss_get_overlay_manager(i);
+
+			if (strncmp(buf, mgr->name, len) == 0)
+				break;
+
+			mgr = NULL;
+		}
+	}
+
+	if (len > 0 && mgr == NULL)
+		return -EINVAL;
+
+	if (mgr)
+		DSSDBG("manager %s found\n", mgr->name);
+
+	if (mgr == ovl->manager)
+		return size;
+
+	old_mgr = ovl->manager;
+
+	/* detach old manager */
+	if (old_mgr) {
+		r = ovl->unset_manager(ovl);
+		if (r) {
+			DSSERR("detach failed\n");
+			return r;
+		}
+
+		r = old_mgr->apply(old_mgr);
+		if (r)
+			return r;
+	}
+
+	if (mgr) {
+		r = ovl->set_manager(ovl, mgr);
+		if (r) {
+			DSSERR("Failed to attach overlay\n");
+			return r;
+		}
+
+		r = mgr->apply(mgr);
+		if (r)
+			return r;
+	}
+
+	return size;
+}
+
+static ssize_t overlay_input_size_show(struct omap_overlay *ovl, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d,%d\n",
+			ovl->info.width, ovl->info.height);
+}
+
+static ssize_t overlay_screen_width_show(struct omap_overlay *ovl, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", ovl->info.screen_width);
+}
+
+static ssize_t overlay_position_show(struct omap_overlay *ovl, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d,%d\n",
+			ovl->info.pos_x, ovl->info.pos_y);
+}
+
+static ssize_t overlay_position_store(struct omap_overlay *ovl,
+		const char *buf, size_t size)
+{
+	int r;
+	char *last;
+	struct omap_overlay_info info;
+
+	ovl->get_overlay_info(ovl, &info);
+
+	info.pos_x = simple_strtoul(buf, &last, 10);
+	++last;
+	if (last - buf >= size)
+		return -EINVAL;
+
+	info.pos_y = simple_strtoul(last, &last, 10);
+
+	r = ovl->set_overlay_info(ovl, &info);
+	if (r)
+		return r;
+
+	if (ovl->manager) {
+		r = ovl->manager->apply(ovl->manager);
+		if (r)
+			return r;
+	}
+
+	return size;
+}
+
+static ssize_t overlay_output_size_show(struct omap_overlay *ovl, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d,%d\n",
+			ovl->info.out_width, ovl->info.out_height);
+}
+
+static ssize_t overlay_output_size_store(struct omap_overlay *ovl,
+		const char *buf, size_t size)
+{
+	int r;
+	char *last;
+	struct omap_overlay_info info;
+
+	ovl->get_overlay_info(ovl, &info);
+
+	info.out_width = simple_strtoul(buf, &last, 10);
+	++last;
+	if (last - buf >= size)
+		return -EINVAL;
+
+	info.out_height = simple_strtoul(last, &last, 10);
+
+	r = ovl->set_overlay_info(ovl, &info);
+	if (r)
+		return r;
+
+	if (ovl->manager) {
+		r = ovl->manager->apply(ovl->manager);
+		if (r)
+			return r;
+	}
+
+	return size;
+}
+
+static ssize_t overlay_enabled_show(struct omap_overlay *ovl, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", ovl->info.enabled);
+}
+
+static ssize_t overlay_enabled_store(struct omap_overlay *ovl, const char *buf,
+		size_t size)
+{
+	int r;
+	struct omap_overlay_info info;
+
+	ovl->get_overlay_info(ovl, &info);
+
+	info.enabled = simple_strtoul(buf, NULL, 10);
+
+	r = ovl->set_overlay_info(ovl, &info);
+	if (r)
+		return r;
+
+	if (ovl->manager) {
+		r = ovl->manager->apply(ovl->manager);
+		if (r)
+			return r;
+	}
+
+	return size;
+}
+
+static ssize_t overlay_global_alpha_show(struct omap_overlay *ovl, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			ovl->info.global_alpha);
+}
+
+static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl,
+		const char *buf, size_t size)
+{
+	int r;
+	struct omap_overlay_info info;
+
+	ovl->get_overlay_info(ovl, &info);
+
+	/* Video1 plane does not support global alpha
+	 * to always make it 255 completely opaque
+	 */
+	if (ovl->id == OMAP_DSS_VIDEO1)
+		info.global_alpha = 255;
+	else
+		info.global_alpha = simple_strtoul(buf, NULL, 10);
+
+	r = ovl->set_overlay_info(ovl, &info);
+	if (r)
+		return r;
+
+	if (ovl->manager) {
+		r = ovl->manager->apply(ovl->manager);
+		if (r)
+			return r;
+	}
+
+	return size;
+}
+
+struct overlay_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct omap_overlay *, char *);
+	ssize_t	(*store)(struct omap_overlay *, const char *, size_t);
+};
+
+#define OVERLAY_ATTR(_name, _mode, _show, _store) \
+	struct overlay_attribute overlay_attr_##_name = \
+	__ATTR(_name, _mode, _show, _store)
+
+static OVERLAY_ATTR(name, S_IRUGO, overlay_name_show, NULL);
+static OVERLAY_ATTR(manager, S_IRUGO|S_IWUSR,
+		overlay_manager_show, overlay_manager_store);
+static OVERLAY_ATTR(input_size, S_IRUGO, overlay_input_size_show, NULL);
+static OVERLAY_ATTR(screen_width, S_IRUGO, overlay_screen_width_show, NULL);
+static OVERLAY_ATTR(position, S_IRUGO|S_IWUSR,
+		overlay_position_show, overlay_position_store);
+static OVERLAY_ATTR(output_size, S_IRUGO|S_IWUSR,
+		overlay_output_size_show, overlay_output_size_store);
+static OVERLAY_ATTR(enabled, S_IRUGO|S_IWUSR,
+		overlay_enabled_show, overlay_enabled_store);
+static OVERLAY_ATTR(global_alpha, S_IRUGO|S_IWUSR,
+		overlay_global_alpha_show, overlay_global_alpha_store);
+
+static struct attribute *overlay_sysfs_attrs[] = {
+	&overlay_attr_name.attr,
+	&overlay_attr_manager.attr,
+	&overlay_attr_input_size.attr,
+	&overlay_attr_screen_width.attr,
+	&overlay_attr_position.attr,
+	&overlay_attr_output_size.attr,
+	&overlay_attr_enabled.attr,
+	&overlay_attr_global_alpha.attr,
+	NULL
+};
+
+static ssize_t overlay_attr_show(struct kobject *kobj, struct attribute *attr,
+		char *buf)
+{
+	struct omap_overlay *overlay;
+	struct overlay_attribute *overlay_attr;
+
+	overlay = container_of(kobj, struct omap_overlay, kobj);
+	overlay_attr = container_of(attr, struct overlay_attribute, attr);
+
+	if (!overlay_attr->show)
+		return -ENOENT;
+
+	return overlay_attr->show(overlay, buf);
+}
+
+static ssize_t overlay_attr_store(struct kobject *kobj, struct attribute *attr,
+		const char *buf, size_t size)
+{
+	struct omap_overlay *overlay;
+	struct overlay_attribute *overlay_attr;
+
+	overlay = container_of(kobj, struct omap_overlay, kobj);
+	overlay_attr = container_of(attr, struct overlay_attribute, attr);
+
+	if (!overlay_attr->store)
+		return -ENOENT;
+
+	return overlay_attr->store(overlay, buf, size);
+}
+
+static struct sysfs_ops overlay_sysfs_ops = {
+	.show = overlay_attr_show,
+	.store = overlay_attr_store,
+};
+
+static struct kobj_type overlay_ktype = {
+	.sysfs_ops = &overlay_sysfs_ops,
+	.default_attrs = overlay_sysfs_attrs,
+};
+
+/* Check if overlay parameters are compatible with display */
+int dss_check_overlay(struct omap_overlay *ovl, struct omap_dss_device *dssdev)
+{
+	struct omap_overlay_info *info;
+	u16 outw, outh;
+	u16 dw, dh;
+
+	if (!dssdev)
+		return 0;
+
+	if (!ovl->info.enabled)
+		return 0;
+
+	info = &ovl->info;
+
+	if (info->paddr == 0) {
+		DSSDBG("check_overlay failed: paddr 0\n");
+		return -EINVAL;
+	}
+
+	dssdev->get_resolution(dssdev, &dw, &dh);
+
+	DSSDBG("check_overlay %d: (%d,%d %dx%d -> %dx%d) disp (%dx%d)\n",
+			ovl->id,
+			info->pos_x, info->pos_y,
+			info->width, info->height,
+			info->out_width, info->out_height,
+			dw, dh);
+
+	if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
+		outw = info->width;
+		outh = info->height;
+	} else {
+		if (info->out_width == 0)
+			outw = info->width;
+		else
+			outw = info->out_width;
+
+		if (info->out_height == 0)
+			outh = info->height;
+		else
+			outh = info->out_height;
+	}
+
+	if (dw < info->pos_x + outw) {
+		DSSDBG("check_overlay failed 1: %d < %d + %d\n",
+				dw, info->pos_x, outw);
+		return -EINVAL;
+	}
+
+	if (dh < info->pos_y + outh) {
+		DSSDBG("check_overlay failed 2: %d < %d + %d\n",
+				dh, info->pos_y, outh);
+		return -EINVAL;
+	}
+
+	if ((ovl->supported_modes & info->color_mode) == 0) {
+		DSSERR("overlay doesn't support mode %d\n", info->color_mode);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int dss_ovl_set_overlay_info(struct omap_overlay *ovl,
+		struct omap_overlay_info *info)
+{
+	int r;
+	struct omap_overlay_info old_info;
+
+	old_info = ovl->info;
+	ovl->info = *info;
+
+	if (ovl->manager) {
+		r = dss_check_overlay(ovl, ovl->manager->device);
+		if (r) {
+			ovl->info = old_info;
+			return r;
+		}
+	}
+
+	ovl->info_dirty = true;
+
+	return 0;
+}
+
+static void dss_ovl_get_overlay_info(struct omap_overlay *ovl,
+		struct omap_overlay_info *info)
+{
+	*info = ovl->info;
+}
+
+static int dss_ovl_wait_for_go(struct omap_overlay *ovl)
+{
+	return dss_mgr_wait_for_go_ovl(ovl);
+}
+
+static int omap_dss_set_manager(struct omap_overlay *ovl,
+		struct omap_overlay_manager *mgr)
+{
+	if (!mgr)
+		return -EINVAL;
+
+	if (ovl->manager) {
+		DSSERR("overlay '%s' already has a manager '%s'\n",
+				ovl->name, ovl->manager->name);
+		return -EINVAL;
+	}
+
+	if (ovl->info.enabled) {
+		DSSERR("overlay has to be disabled to change the manager\n");
+		return -EINVAL;
+	}
+
+	ovl->manager = mgr;
+
+	dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+	/* XXX: on manual update display, in auto update mode, a bug happens
+	 * here. When an overlay is first enabled on LCD, then it's disabled,
+	 * and the manager is changed to TV, we sometimes get SYNC_LOST_DIGIT
+	 * errors. Waiting before changing the channel_out fixes it. I'm
+	 * guessing that the overlay is still somehow being used for the LCD,
+	 * but I don't understand how or why. */
+	msleep(40);
+	dispc_set_channel_out(ovl->id, mgr->id);
+	dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+	return 0;
+}
+
+static int omap_dss_unset_manager(struct omap_overlay *ovl)
+{
+	int r;
+
+	if (!ovl->manager) {
+		DSSERR("failed to detach overlay: manager not set\n");
+		return -EINVAL;
+	}
+
+	if (ovl->info.enabled) {
+		DSSERR("overlay has to be disabled to unset the manager\n");
+		return -EINVAL;
+	}
+
+	r = ovl->wait_for_go(ovl);
+	if (r)
+		return r;
+
+	ovl->manager = NULL;
+
+	return 0;
+}
+
+int omap_dss_get_num_overlays(void)
+{
+	return num_overlays;
+}
+EXPORT_SYMBOL(omap_dss_get_num_overlays);
+
+struct omap_overlay *omap_dss_get_overlay(int num)
+{
+	int i = 0;
+	struct omap_overlay *ovl;
+
+	list_for_each_entry(ovl, &overlay_list, list) {
+		if (i++ == num)
+			return ovl;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(omap_dss_get_overlay);
+
+static void omap_dss_add_overlay(struct omap_overlay *overlay)
+{
+	++num_overlays;
+	list_add_tail(&overlay->list, &overlay_list);
+}
+
+static struct omap_overlay *dispc_overlays[3];
+
+void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr)
+{
+	mgr->num_overlays = 3;
+	mgr->overlays = dispc_overlays;
+}
+
+#ifdef L4_EXAMPLE
+static struct omap_overlay *l4_overlays[1];
+void dss_overlay_setup_l4_manager(struct omap_overlay_manager *mgr)
+{
+	mgr->num_overlays = 1;
+	mgr->overlays = l4_overlays;
+}
+#endif
+
+void dss_init_overlays(struct platform_device *pdev)
+{
+	int i, r;
+
+	INIT_LIST_HEAD(&overlay_list);
+
+	num_overlays = 0;
+
+	for (i = 0; i < 3; ++i) {
+		struct omap_overlay *ovl;
+		ovl = kzalloc(sizeof(*ovl), GFP_KERNEL);
+
+		BUG_ON(ovl == NULL);
+
+		switch (i) {
+		case 0:
+			ovl->name = "gfx";
+			ovl->id = OMAP_DSS_GFX;
+			ovl->supported_modes = cpu_is_omap34xx() ?
+				OMAP_DSS_COLOR_GFX_OMAP3 :
+				OMAP_DSS_COLOR_GFX_OMAP2;
+			ovl->caps = OMAP_DSS_OVL_CAP_DISPC;
+			ovl->info.global_alpha = 255;
+			break;
+		case 1:
+			ovl->name = "vid1";
+			ovl->id = OMAP_DSS_VIDEO1;
+			ovl->supported_modes = cpu_is_omap34xx() ?
+				OMAP_DSS_COLOR_VID1_OMAP3 :
+				OMAP_DSS_COLOR_VID_OMAP2;
+			ovl->caps = OMAP_DSS_OVL_CAP_SCALE |
+				OMAP_DSS_OVL_CAP_DISPC;
+			ovl->info.global_alpha = 255;
+			break;
+		case 2:
+			ovl->name = "vid2";
+			ovl->id = OMAP_DSS_VIDEO2;
+			ovl->supported_modes = cpu_is_omap34xx() ?
+				OMAP_DSS_COLOR_VID2_OMAP3 :
+				OMAP_DSS_COLOR_VID_OMAP2;
+			ovl->caps = OMAP_DSS_OVL_CAP_SCALE |
+				OMAP_DSS_OVL_CAP_DISPC;
+			ovl->info.global_alpha = 255;
+			break;
+		}
+
+		ovl->set_manager = &omap_dss_set_manager;
+		ovl->unset_manager = &omap_dss_unset_manager;
+		ovl->set_overlay_info = &dss_ovl_set_overlay_info;
+		ovl->get_overlay_info = &dss_ovl_get_overlay_info;
+		ovl->wait_for_go = &dss_ovl_wait_for_go;
+
+		omap_dss_add_overlay(ovl);
+
+		r = kobject_init_and_add(&ovl->kobj, &overlay_ktype,
+				&pdev->dev.kobj, "overlay%d", i);
+
+		if (r) {
+			DSSERR("failed to create sysfs file\n");
+			continue;
+		}
+
+		dispc_overlays[i] = ovl;
+	}
+
+#ifdef L4_EXAMPLE
+	{
+		struct omap_overlay *ovl;
+		ovl = kzalloc(sizeof(*ovl), GFP_KERNEL);
+
+		BUG_ON(ovl == NULL);
+
+		ovl->name = "l4";
+		ovl->supported_modes = OMAP_DSS_COLOR_RGB24U;
+
+		ovl->set_manager = &omap_dss_set_manager;
+		ovl->unset_manager = &omap_dss_unset_manager;
+		ovl->set_overlay_info = &dss_ovl_set_overlay_info;
+		ovl->get_overlay_info = &dss_ovl_get_overlay_info;
+
+		omap_dss_add_overlay(ovl);
+
+		r = kobject_init_and_add(&ovl->kobj, &overlay_ktype,
+				&pdev->dev.kobj, "overlayl4");
+
+		if (r)
+			DSSERR("failed to create sysfs file\n");
+
+		l4_overlays[0] = ovl;
+	}
+#endif
+}
+
+/* connect overlays to the new device, if not already connected. if force
+ * selected, connect always. */
+void dss_recheck_connections(struct omap_dss_device *dssdev, bool force)
+{
+	int i;
+	struct omap_overlay_manager *lcd_mgr;
+	struct omap_overlay_manager *tv_mgr;
+	struct omap_overlay_manager *mgr = NULL;
+
+	lcd_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_LCD);
+	tv_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_TV);
+
+	if (dssdev->type != OMAP_DISPLAY_TYPE_VENC) {
+		if (!lcd_mgr->device || force) {
+			if (lcd_mgr->device)
+				lcd_mgr->unset_device(lcd_mgr);
+			lcd_mgr->set_device(lcd_mgr, dssdev);
+			mgr = lcd_mgr;
+		}
+	}
+
+	if (dssdev->type == OMAP_DISPLAY_TYPE_VENC) {
+		if (!tv_mgr->device || force) {
+			if (tv_mgr->device)
+				tv_mgr->unset_device(tv_mgr);
+			tv_mgr->set_device(tv_mgr, dssdev);
+			mgr = tv_mgr;
+		}
+	}
+
+	if (mgr) {
+		for (i = 0; i < 3; i++) {
+			struct omap_overlay *ovl;
+			ovl = omap_dss_get_overlay(i);
+			if (!ovl->manager || force) {
+				if (ovl->manager)
+					omap_dss_unset_manager(ovl);
+				omap_dss_set_manager(ovl, mgr);
+			}
+		}
+	}
+}
+
+void dss_uninit_overlays(struct platform_device *pdev)
+{
+	struct omap_overlay *ovl;
+
+	while (!list_empty(&overlay_list)) {
+		ovl = list_first_entry(&overlay_list,
+				struct omap_overlay, list);
+		list_del(&ovl->list);
+		kobject_del(&ovl->kobj);
+		kobject_put(&ovl->kobj);
+		kfree(ovl);
+	}
+
+	num_overlays = 0;
+}
+
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
new file mode 100644
index 0000000..d0b3006
--- /dev/null
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -0,0 +1,1309 @@
+/*
+ * linux/drivers/video/omap2/dss/rfbi.c
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "RFBI"
+
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/kfifo.h>
+#include <linux/ktime.h>
+#include <linux/hrtimer.h>
+#include <linux/seq_file.h>
+
+#include <plat/display.h>
+#include "dss.h"
+
+/*#define MEASURE_PERF*/
+
+#define RFBI_BASE               0x48050800
+
+struct rfbi_reg { u16 idx; };
+
+#define RFBI_REG(idx)		((const struct rfbi_reg) { idx })
+
+#define RFBI_REVISION		RFBI_REG(0x0000)
+#define RFBI_SYSCONFIG		RFBI_REG(0x0010)
+#define RFBI_SYSSTATUS		RFBI_REG(0x0014)
+#define RFBI_CONTROL		RFBI_REG(0x0040)
+#define RFBI_PIXEL_CNT		RFBI_REG(0x0044)
+#define RFBI_LINE_NUMBER	RFBI_REG(0x0048)
+#define RFBI_CMD		RFBI_REG(0x004c)
+#define RFBI_PARAM		RFBI_REG(0x0050)
+#define RFBI_DATA		RFBI_REG(0x0054)
+#define RFBI_READ		RFBI_REG(0x0058)
+#define RFBI_STATUS		RFBI_REG(0x005c)
+
+#define RFBI_CONFIG(n)		RFBI_REG(0x0060 + (n)*0x18)
+#define RFBI_ONOFF_TIME(n)	RFBI_REG(0x0064 + (n)*0x18)
+#define RFBI_CYCLE_TIME(n)	RFBI_REG(0x0068 + (n)*0x18)
+#define RFBI_DATA_CYCLE1(n)	RFBI_REG(0x006c + (n)*0x18)
+#define RFBI_DATA_CYCLE2(n)	RFBI_REG(0x0070 + (n)*0x18)
+#define RFBI_DATA_CYCLE3(n)	RFBI_REG(0x0074 + (n)*0x18)
+
+#define RFBI_VSYNC_WIDTH	RFBI_REG(0x0090)
+#define RFBI_HSYNC_WIDTH	RFBI_REG(0x0094)
+
+#define RFBI_CMD_FIFO_LEN_BYTES (16 * sizeof(struct update_param))
+
+#define REG_FLD_MOD(idx, val, start, end) \
+	rfbi_write_reg(idx, FLD_MOD(rfbi_read_reg(idx), val, start, end))
+
+/* To work around an RFBI transfer rate limitation */
+#define OMAP_RFBI_RATE_LIMIT    1
+
+enum omap_rfbi_cycleformat {
+	OMAP_DSS_RFBI_CYCLEFORMAT_1_1 = 0,
+	OMAP_DSS_RFBI_CYCLEFORMAT_2_1 = 1,
+	OMAP_DSS_RFBI_CYCLEFORMAT_3_1 = 2,
+	OMAP_DSS_RFBI_CYCLEFORMAT_3_2 = 3,
+};
+
+enum omap_rfbi_datatype {
+	OMAP_DSS_RFBI_DATATYPE_12 = 0,
+	OMAP_DSS_RFBI_DATATYPE_16 = 1,
+	OMAP_DSS_RFBI_DATATYPE_18 = 2,
+	OMAP_DSS_RFBI_DATATYPE_24 = 3,
+};
+
+enum omap_rfbi_parallelmode {
+	OMAP_DSS_RFBI_PARALLELMODE_8 = 0,
+	OMAP_DSS_RFBI_PARALLELMODE_9 = 1,
+	OMAP_DSS_RFBI_PARALLELMODE_12 = 2,
+	OMAP_DSS_RFBI_PARALLELMODE_16 = 3,
+};
+
+enum update_cmd {
+	RFBI_CMD_UPDATE = 0,
+	RFBI_CMD_SYNC   = 1,
+};
+
+static int rfbi_convert_timings(struct rfbi_timings *t);
+static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div);
+static void process_cmd_fifo(void);
+
+static struct {
+	void __iomem	*base;
+
+	unsigned long	l4_khz;
+
+	enum omap_rfbi_datatype datatype;
+	enum omap_rfbi_parallelmode parallelmode;
+
+	enum omap_rfbi_te_mode te_mode;
+	int te_enabled;
+
+	void (*framedone_callback)(void *data);
+	void *framedone_callback_data;
+
+	struct omap_dss_device *dssdev[2];
+
+	struct kfifo      *cmd_fifo;
+	spinlock_t        cmd_lock;
+	struct completion cmd_done;
+	atomic_t          cmd_fifo_full;
+	atomic_t          cmd_pending;
+#ifdef MEASURE_PERF
+	unsigned perf_bytes;
+	ktime_t perf_setup_time;
+	ktime_t perf_start_time;
+#endif
+} rfbi;
+
+struct update_region {
+	u16	x;
+	u16     y;
+	u16     w;
+	u16     h;
+};
+
+struct update_param {
+	u8 rfbi_module;
+	u8 cmd;
+
+	union {
+		struct update_region r;
+		struct completion *sync;
+	} par;
+};
+
+static inline void rfbi_write_reg(const struct rfbi_reg idx, u32 val)
+{
+	__raw_writel(val, rfbi.base + idx.idx);
+}
+
+static inline u32 rfbi_read_reg(const struct rfbi_reg idx)
+{
+	return __raw_readl(rfbi.base + idx.idx);
+}
+
+static void rfbi_enable_clocks(bool enable)
+{
+	if (enable)
+		dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+	else
+		dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+}
+
+void omap_rfbi_write_command(const void *buf, u32 len)
+{
+	rfbi_enable_clocks(1);
+	switch (rfbi.parallelmode) {
+	case OMAP_DSS_RFBI_PARALLELMODE_8:
+	{
+		const u8 *b = buf;
+		for (; len; len--)
+			rfbi_write_reg(RFBI_CMD, *b++);
+		break;
+	}
+
+	case OMAP_DSS_RFBI_PARALLELMODE_16:
+	{
+		const u16 *w = buf;
+		BUG_ON(len & 1);
+		for (; len; len -= 2)
+			rfbi_write_reg(RFBI_CMD, *w++);
+		break;
+	}
+
+	case OMAP_DSS_RFBI_PARALLELMODE_9:
+	case OMAP_DSS_RFBI_PARALLELMODE_12:
+	default:
+		BUG();
+	}
+	rfbi_enable_clocks(0);
+}
+EXPORT_SYMBOL(omap_rfbi_write_command);
+
+void omap_rfbi_read_data(void *buf, u32 len)
+{
+	rfbi_enable_clocks(1);
+	switch (rfbi.parallelmode) {
+	case OMAP_DSS_RFBI_PARALLELMODE_8:
+	{
+		u8 *b = buf;
+		for (; len; len--) {
+			rfbi_write_reg(RFBI_READ, 0);
+			*b++ = rfbi_read_reg(RFBI_READ);
+		}
+		break;
+	}
+
+	case OMAP_DSS_RFBI_PARALLELMODE_16:
+	{
+		u16 *w = buf;
+		BUG_ON(len & ~1);
+		for (; len; len -= 2) {
+			rfbi_write_reg(RFBI_READ, 0);
+			*w++ = rfbi_read_reg(RFBI_READ);
+		}
+		break;
+	}
+
+	case OMAP_DSS_RFBI_PARALLELMODE_9:
+	case OMAP_DSS_RFBI_PARALLELMODE_12:
+	default:
+		BUG();
+	}
+	rfbi_enable_clocks(0);
+}
+EXPORT_SYMBOL(omap_rfbi_read_data);
+
+void omap_rfbi_write_data(const void *buf, u32 len)
+{
+	rfbi_enable_clocks(1);
+	switch (rfbi.parallelmode) {
+	case OMAP_DSS_RFBI_PARALLELMODE_8:
+	{
+		const u8 *b = buf;
+		for (; len; len--)
+			rfbi_write_reg(RFBI_PARAM, *b++);
+		break;
+	}
+
+	case OMAP_DSS_RFBI_PARALLELMODE_16:
+	{
+		const u16 *w = buf;
+		BUG_ON(len & 1);
+		for (; len; len -= 2)
+			rfbi_write_reg(RFBI_PARAM, *w++);
+		break;
+	}
+
+	case OMAP_DSS_RFBI_PARALLELMODE_9:
+	case OMAP_DSS_RFBI_PARALLELMODE_12:
+	default:
+		BUG();
+
+	}
+	rfbi_enable_clocks(0);
+}
+EXPORT_SYMBOL(omap_rfbi_write_data);
+
+void omap_rfbi_write_pixels(const void __iomem *buf, int scr_width,
+		u16 x, u16 y,
+		u16 w, u16 h)
+{
+	int start_offset = scr_width * y + x;
+	int horiz_offset = scr_width - w;
+	int i;
+
+	rfbi_enable_clocks(1);
+
+	if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_16 &&
+	   rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_8) {
+		const u16 __iomem *pd = buf;
+		pd += start_offset;
+
+		for (; h; --h) {
+			for (i = 0; i < w; ++i) {
+				const u8 __iomem *b = (const u8 __iomem *)pd;
+				rfbi_write_reg(RFBI_PARAM, __raw_readb(b+1));
+				rfbi_write_reg(RFBI_PARAM, __raw_readb(b+0));
+				++pd;
+			}
+			pd += horiz_offset;
+		}
+	} else if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_24 &&
+	   rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_8) {
+		const u32 __iomem *pd = buf;
+		pd += start_offset;
+
+		for (; h; --h) {
+			for (i = 0; i < w; ++i) {
+				const u8 __iomem *b = (const u8 __iomem *)pd;
+				rfbi_write_reg(RFBI_PARAM, __raw_readb(b+2));
+				rfbi_write_reg(RFBI_PARAM, __raw_readb(b+1));
+				rfbi_write_reg(RFBI_PARAM, __raw_readb(b+0));
+				++pd;
+			}
+			pd += horiz_offset;
+		}
+	} else if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_16 &&
+	   rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_16) {
+		const u16 __iomem *pd = buf;
+		pd += start_offset;
+
+		for (; h; --h) {
+			for (i = 0; i < w; ++i) {
+				rfbi_write_reg(RFBI_PARAM, __raw_readw(pd));
+				++pd;
+			}
+			pd += horiz_offset;
+		}
+	} else {
+		BUG();
+	}
+
+	rfbi_enable_clocks(0);
+}
+EXPORT_SYMBOL(omap_rfbi_write_pixels);
+
+#ifdef MEASURE_PERF
+static void perf_mark_setup(void)
+{
+	rfbi.perf_setup_time = ktime_get();
+}
+
+static void perf_mark_start(void)
+{
+	rfbi.perf_start_time = ktime_get();
+}
+
+static void perf_show(const char *name)
+{
+	ktime_t t, setup_time, trans_time;
+	u32 total_bytes;
+	u32 setup_us, trans_us, total_us;
+
+	t = ktime_get();
+
+	setup_time = ktime_sub(rfbi.perf_start_time, rfbi.perf_setup_time);
+	setup_us = (u32)ktime_to_us(setup_time);
+	if (setup_us == 0)
+		setup_us = 1;
+
+	trans_time = ktime_sub(t, rfbi.perf_start_time);
+	trans_us = (u32)ktime_to_us(trans_time);
+	if (trans_us == 0)
+		trans_us = 1;
+
+	total_us = setup_us + trans_us;
+
+	total_bytes = rfbi.perf_bytes;
+
+	DSSINFO("%s update %u us + %u us = %u us (%uHz), %u bytes, "
+			"%u kbytes/sec\n",
+			name,
+			setup_us,
+			trans_us,
+			total_us,
+			1000*1000 / total_us,
+			total_bytes,
+			total_bytes * 1000 / total_us);
+}
+#else
+#define perf_mark_setup()
+#define perf_mark_start()
+#define perf_show(x)
+#endif
+
+void rfbi_transfer_area(u16 width, u16 height,
+			     void (callback)(void *data), void *data)
+{
+	u32 l;
+
+	/*BUG_ON(callback == 0);*/
+	BUG_ON(rfbi.framedone_callback != NULL);
+
+	DSSDBG("rfbi_transfer_area %dx%d\n", width, height);
+
+	dispc_set_lcd_size(width, height);
+
+	dispc_enable_lcd_out(1);
+
+	rfbi.framedone_callback = callback;
+	rfbi.framedone_callback_data = data;
+
+	rfbi_enable_clocks(1);
+
+	rfbi_write_reg(RFBI_PIXEL_CNT, width * height);
+
+	l = rfbi_read_reg(RFBI_CONTROL);
+	l = FLD_MOD(l, 1, 0, 0); /* enable */
+	if (!rfbi.te_enabled)
+		l = FLD_MOD(l, 1, 4, 4); /* ITE */
+
+	perf_mark_start();
+
+	rfbi_write_reg(RFBI_CONTROL, l);
+}
+
+static void framedone_callback(void *data, u32 mask)
+{
+	void (*callback)(void *data);
+
+	DSSDBG("FRAMEDONE\n");
+
+	perf_show("DISPC");
+
+	REG_FLD_MOD(RFBI_CONTROL, 0, 0, 0);
+
+	rfbi_enable_clocks(0);
+
+	callback = rfbi.framedone_callback;
+	rfbi.framedone_callback = NULL;
+
+	/*callback(rfbi.framedone_callback_data);*/
+
+	atomic_set(&rfbi.cmd_pending, 0);
+
+	process_cmd_fifo();
+}
+
+#if 1 /* VERBOSE */
+static void rfbi_print_timings(void)
+{
+	u32 l;
+	u32 time;
+
+	l = rfbi_read_reg(RFBI_CONFIG(0));
+	time = 1000000000 / rfbi.l4_khz;
+	if (l & (1 << 4))
+		time *= 2;
+
+	DSSDBG("Tick time %u ps\n", time);
+	l = rfbi_read_reg(RFBI_ONOFF_TIME(0));
+	DSSDBG("CSONTIME %d, CSOFFTIME %d, WEONTIME %d, WEOFFTIME %d, "
+		"REONTIME %d, REOFFTIME %d\n",
+		l & 0x0f, (l >> 4) & 0x3f, (l >> 10) & 0x0f, (l >> 14) & 0x3f,
+		(l >> 20) & 0x0f, (l >> 24) & 0x3f);
+
+	l = rfbi_read_reg(RFBI_CYCLE_TIME(0));
+	DSSDBG("WECYCLETIME %d, RECYCLETIME %d, CSPULSEWIDTH %d, "
+		"ACCESSTIME %d\n",
+		(l & 0x3f), (l >> 6) & 0x3f, (l >> 12) & 0x3f,
+		(l >> 22) & 0x3f);
+}
+#else
+static void rfbi_print_timings(void) {}
+#endif
+
+
+
+
+static u32 extif_clk_period;
+
+static inline unsigned long round_to_extif_ticks(unsigned long ps, int div)
+{
+	int bus_tick = extif_clk_period * div;
+	return (ps + bus_tick - 1) / bus_tick * bus_tick;
+}
+
+static int calc_reg_timing(struct rfbi_timings *t, int div)
+{
+	t->clk_div = div;
+
+	t->cs_on_time = round_to_extif_ticks(t->cs_on_time, div);
+
+	t->we_on_time = round_to_extif_ticks(t->we_on_time, div);
+	t->we_off_time = round_to_extif_ticks(t->we_off_time, div);
+	t->we_cycle_time = round_to_extif_ticks(t->we_cycle_time, div);
+
+	t->re_on_time = round_to_extif_ticks(t->re_on_time, div);
+	t->re_off_time = round_to_extif_ticks(t->re_off_time, div);
+	t->re_cycle_time = round_to_extif_ticks(t->re_cycle_time, div);
+
+	t->access_time = round_to_extif_ticks(t->access_time, div);
+	t->cs_off_time = round_to_extif_ticks(t->cs_off_time, div);
+	t->cs_pulse_width = round_to_extif_ticks(t->cs_pulse_width, div);
+
+	DSSDBG("[reg]cson %d csoff %d reon %d reoff %d\n",
+	       t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
+	DSSDBG("[reg]weon %d weoff %d recyc %d wecyc %d\n",
+	       t->we_on_time, t->we_off_time, t->re_cycle_time,
+	       t->we_cycle_time);
+	DSSDBG("[reg]rdaccess %d cspulse %d\n",
+	       t->access_time, t->cs_pulse_width);
+
+	return rfbi_convert_timings(t);
+}
+
+static int calc_extif_timings(struct rfbi_timings *t)
+{
+	u32 max_clk_div;
+	int div;
+
+	rfbi_get_clk_info(&extif_clk_period, &max_clk_div);
+	for (div = 1; div <= max_clk_div; div++) {
+		if (calc_reg_timing(t, div) == 0)
+			break;
+	}
+
+	if (div <= max_clk_div)
+		return 0;
+
+	DSSERR("can't setup timings\n");
+	return -1;
+}
+
+
+void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t)
+{
+	int r;
+
+	if (!t->converted) {
+		r = calc_extif_timings(t);
+		if (r < 0)
+			DSSERR("Failed to calc timings\n");
+	}
+
+	BUG_ON(!t->converted);
+
+	rfbi_enable_clocks(1);
+	rfbi_write_reg(RFBI_ONOFF_TIME(rfbi_module), t->tim[0]);
+	rfbi_write_reg(RFBI_CYCLE_TIME(rfbi_module), t->tim[1]);
+
+	/* TIMEGRANULARITY */
+	REG_FLD_MOD(RFBI_CONFIG(rfbi_module),
+		    (t->tim[2] ? 1 : 0), 4, 4);
+
+	rfbi_print_timings();
+	rfbi_enable_clocks(0);
+}
+
+static int ps_to_rfbi_ticks(int time, int div)
+{
+	unsigned long tick_ps;
+	int ret;
+
+	/* Calculate in picosecs to yield more exact results */
+	tick_ps = 1000000000 / (rfbi.l4_khz) * div;
+
+	ret = (time + tick_ps - 1) / tick_ps;
+
+	return ret;
+}
+
+#ifdef OMAP_RFBI_RATE_LIMIT
+unsigned long rfbi_get_max_tx_rate(void)
+{
+	unsigned long   l4_rate, dss1_rate;
+	int             min_l4_ticks = 0;
+	int             i;
+
+	/* According to TI this can't be calculated so make the
+	 * adjustments for a couple of known frequencies and warn for
+	 * others.
+	 */
+	static const struct {
+		unsigned long l4_clk;           /* HZ */
+		unsigned long dss1_clk;         /* HZ */
+		unsigned long min_l4_ticks;
+	} ftab[] = {
+		{ 55,   132,    7, },           /* 7.86 MPix/s */
+		{ 110,  110,    12, },          /* 9.16 MPix/s */
+		{ 110,  132,    10, },          /* 11   Mpix/s */
+		{ 120,  120,    10, },          /* 12   Mpix/s */
+		{ 133,  133,    10, },          /* 13.3 Mpix/s */
+	};
+
+	l4_rate = rfbi.l4_khz / 1000;
+	dss1_rate = dss_clk_get_rate(DSS_CLK_FCK1) / 1000000;
+
+	for (i = 0; i < ARRAY_SIZE(ftab); i++) {
+		/* Use a window instead of an exact match, to account
+		 * for different DPLL multiplier / divider pairs.
+		 */
+		if (abs(ftab[i].l4_clk - l4_rate) < 3 &&
+		    abs(ftab[i].dss1_clk - dss1_rate) < 3) {
+			min_l4_ticks = ftab[i].min_l4_ticks;
+			break;
+		}
+	}
+	if (i == ARRAY_SIZE(ftab)) {
+		/* Can't be sure, return anyway the maximum not
+		 * rate-limited. This might cause a problem only for the
+		 * tearing synchronisation.
+		 */
+		DSSERR("can't determine maximum RFBI transfer rate\n");
+		return rfbi.l4_khz * 1000;
+	}
+	return rfbi.l4_khz * 1000 / min_l4_ticks;
+}
+#else
+int rfbi_get_max_tx_rate(void)
+{
+	return rfbi.l4_khz * 1000;
+}
+#endif
+
+static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div)
+{
+	*clk_period = 1000000000 / rfbi.l4_khz;
+	*max_clk_div = 2;
+}
+
+static int rfbi_convert_timings(struct rfbi_timings *t)
+{
+	u32 l;
+	int reon, reoff, weon, weoff, cson, csoff, cs_pulse;
+	int actim, recyc, wecyc;
+	int div = t->clk_div;
+
+	if (div <= 0 || div > 2)
+		return -1;
+
+	/* Make sure that after conversion it still holds that:
+	 * weoff > weon, reoff > reon, recyc >= reoff, wecyc >= weoff,
+	 * csoff > cson, csoff >= max(weoff, reoff), actim > reon
+	 */
+	weon = ps_to_rfbi_ticks(t->we_on_time, div);
+	weoff = ps_to_rfbi_ticks(t->we_off_time, div);
+	if (weoff <= weon)
+		weoff = weon + 1;
+	if (weon > 0x0f)
+		return -1;
+	if (weoff > 0x3f)
+		return -1;
+
+	reon = ps_to_rfbi_ticks(t->re_on_time, div);
+	reoff = ps_to_rfbi_ticks(t->re_off_time, div);
+	if (reoff <= reon)
+		reoff = reon + 1;
+	if (reon > 0x0f)
+		return -1;
+	if (reoff > 0x3f)
+		return -1;
+
+	cson = ps_to_rfbi_ticks(t->cs_on_time, div);
+	csoff = ps_to_rfbi_ticks(t->cs_off_time, div);
+	if (csoff <= cson)
+		csoff = cson + 1;
+	if (csoff < max(weoff, reoff))
+		csoff = max(weoff, reoff);
+	if (cson > 0x0f)
+		return -1;
+	if (csoff > 0x3f)
+		return -1;
+
+	l =  cson;
+	l |= csoff << 4;
+	l |= weon  << 10;
+	l |= weoff << 14;
+	l |= reon  << 20;
+	l |= reoff << 24;
+
+	t->tim[0] = l;
+
+	actim = ps_to_rfbi_ticks(t->access_time, div);
+	if (actim <= reon)
+		actim = reon + 1;
+	if (actim > 0x3f)
+		return -1;
+
+	wecyc = ps_to_rfbi_ticks(t->we_cycle_time, div);
+	if (wecyc < weoff)
+		wecyc = weoff;
+	if (wecyc > 0x3f)
+		return -1;
+
+	recyc = ps_to_rfbi_ticks(t->re_cycle_time, div);
+	if (recyc < reoff)
+		recyc = reoff;
+	if (recyc > 0x3f)
+		return -1;
+
+	cs_pulse = ps_to_rfbi_ticks(t->cs_pulse_width, div);
+	if (cs_pulse > 0x3f)
+		return -1;
+
+	l =  wecyc;
+	l |= recyc    << 6;
+	l |= cs_pulse << 12;
+	l |= actim    << 22;
+
+	t->tim[1] = l;
+
+	t->tim[2] = div - 1;
+
+	t->converted = 1;
+
+	return 0;
+}
+
+/* xxx FIX module selection missing */
+int omap_rfbi_setup_te(enum omap_rfbi_te_mode mode,
+			     unsigned hs_pulse_time, unsigned vs_pulse_time,
+			     int hs_pol_inv, int vs_pol_inv, int extif_div)
+{
+	int hs, vs;
+	int min;
+	u32 l;
+
+	hs = ps_to_rfbi_ticks(hs_pulse_time, 1);
+	vs = ps_to_rfbi_ticks(vs_pulse_time, 1);
+	if (hs < 2)
+		return -EDOM;
+	if (mode == OMAP_DSS_RFBI_TE_MODE_2)
+		min = 2;
+	else /* OMAP_DSS_RFBI_TE_MODE_1 */
+		min = 4;
+	if (vs < min)
+		return -EDOM;
+	if (vs == hs)
+		return -EINVAL;
+	rfbi.te_mode = mode;
+	DSSDBG("setup_te: mode %d hs %d vs %d hs_inv %d vs_inv %d\n",
+		mode, hs, vs, hs_pol_inv, vs_pol_inv);
+
+	rfbi_enable_clocks(1);
+	rfbi_write_reg(RFBI_HSYNC_WIDTH, hs);
+	rfbi_write_reg(RFBI_VSYNC_WIDTH, vs);
+
+	l = rfbi_read_reg(RFBI_CONFIG(0));
+	if (hs_pol_inv)
+		l &= ~(1 << 21);
+	else
+		l |= 1 << 21;
+	if (vs_pol_inv)
+		l &= ~(1 << 20);
+	else
+		l |= 1 << 20;
+	rfbi_enable_clocks(0);
+
+	return 0;
+}
+EXPORT_SYMBOL(omap_rfbi_setup_te);
+
+/* xxx FIX module selection missing */
+int omap_rfbi_enable_te(bool enable, unsigned line)
+{
+	u32 l;
+
+	DSSDBG("te %d line %d mode %d\n", enable, line, rfbi.te_mode);
+	if (line > (1 << 11) - 1)
+		return -EINVAL;
+
+	rfbi_enable_clocks(1);
+	l = rfbi_read_reg(RFBI_CONFIG(0));
+	l &= ~(0x3 << 2);
+	if (enable) {
+		rfbi.te_enabled = 1;
+		l |= rfbi.te_mode << 2;
+	} else
+		rfbi.te_enabled = 0;
+	rfbi_write_reg(RFBI_CONFIG(0), l);
+	rfbi_write_reg(RFBI_LINE_NUMBER, line);
+	rfbi_enable_clocks(0);
+
+	return 0;
+}
+EXPORT_SYMBOL(omap_rfbi_enable_te);
+
+#if 0
+static void rfbi_enable_config(int enable1, int enable2)
+{
+	u32 l;
+	int cs = 0;
+
+	if (enable1)
+		cs |= 1<<0;
+	if (enable2)
+		cs |= 1<<1;
+
+	rfbi_enable_clocks(1);
+
+	l = rfbi_read_reg(RFBI_CONTROL);
+
+	l = FLD_MOD(l, cs, 3, 2);
+	l = FLD_MOD(l, 0, 1, 1);
+
+	rfbi_write_reg(RFBI_CONTROL, l);
+
+
+	l = rfbi_read_reg(RFBI_CONFIG(0));
+	l = FLD_MOD(l, 0, 3, 2); /* TRIGGERMODE: ITE */
+	/*l |= FLD_VAL(2, 8, 7); */ /* L4FORMAT, 2pix/L4 */
+	/*l |= FLD_VAL(0, 8, 7); */ /* L4FORMAT, 1pix/L4 */
+
+	l = FLD_MOD(l, 0, 16, 16); /* A0POLARITY */
+	l = FLD_MOD(l, 1, 20, 20); /* TE_VSYNC_POLARITY */
+	l = FLD_MOD(l, 1, 21, 21); /* HSYNCPOLARITY */
+
+	l = FLD_MOD(l, OMAP_DSS_RFBI_PARALLELMODE_8, 1, 0);
+	rfbi_write_reg(RFBI_CONFIG(0), l);
+
+	rfbi_enable_clocks(0);
+}
+#endif
+
+int rfbi_configure(int rfbi_module, int bpp, int lines)
+{
+	u32 l;
+	int cycle1 = 0, cycle2 = 0, cycle3 = 0;
+	enum omap_rfbi_cycleformat cycleformat;
+	enum omap_rfbi_datatype datatype;
+	enum omap_rfbi_parallelmode parallelmode;
+
+	switch (bpp) {
+	case 12:
+		datatype = OMAP_DSS_RFBI_DATATYPE_12;
+		break;
+	case 16:
+		datatype = OMAP_DSS_RFBI_DATATYPE_16;
+		break;
+	case 18:
+		datatype = OMAP_DSS_RFBI_DATATYPE_18;
+		break;
+	case 24:
+		datatype = OMAP_DSS_RFBI_DATATYPE_24;
+		break;
+	default:
+		BUG();
+		return 1;
+	}
+	rfbi.datatype = datatype;
+
+	switch (lines) {
+	case 8:
+		parallelmode = OMAP_DSS_RFBI_PARALLELMODE_8;
+		break;
+	case 9:
+		parallelmode = OMAP_DSS_RFBI_PARALLELMODE_9;
+		break;
+	case 12:
+		parallelmode = OMAP_DSS_RFBI_PARALLELMODE_12;
+		break;
+	case 16:
+		parallelmode = OMAP_DSS_RFBI_PARALLELMODE_16;
+		break;
+	default:
+		BUG();
+		return 1;
+	}
+	rfbi.parallelmode = parallelmode;
+
+	if ((bpp % lines) == 0) {
+		switch (bpp / lines) {
+		case 1:
+			cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_1_1;
+			break;
+		case 2:
+			cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_2_1;
+			break;
+		case 3:
+			cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_3_1;
+			break;
+		default:
+			BUG();
+			return 1;
+		}
+	} else if ((2 * bpp % lines) == 0) {
+		if ((2 * bpp / lines) == 3)
+			cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_3_2;
+		else {
+			BUG();
+			return 1;
+		}
+	} else {
+		BUG();
+		return 1;
+	}
+
+	switch (cycleformat) {
+	case OMAP_DSS_RFBI_CYCLEFORMAT_1_1:
+		cycle1 = lines;
+		break;
+
+	case OMAP_DSS_RFBI_CYCLEFORMAT_2_1:
+		cycle1 = lines;
+		cycle2 = lines;
+		break;
+
+	case OMAP_DSS_RFBI_CYCLEFORMAT_3_1:
+		cycle1 = lines;
+		cycle2 = lines;
+		cycle3 = lines;
+		break;
+
+	case OMAP_DSS_RFBI_CYCLEFORMAT_3_2:
+		cycle1 = lines;
+		cycle2 = (lines / 2) | ((lines / 2) << 16);
+		cycle3 = (lines << 16);
+		break;
+	}
+
+	rfbi_enable_clocks(1);
+
+	REG_FLD_MOD(RFBI_CONTROL, 0, 3, 2); /* clear CS */
+
+	l = 0;
+	l |= FLD_VAL(parallelmode, 1, 0);
+	l |= FLD_VAL(0, 3, 2);		/* TRIGGERMODE: ITE */
+	l |= FLD_VAL(0, 4, 4);		/* TIMEGRANULARITY */
+	l |= FLD_VAL(datatype, 6, 5);
+	/* l |= FLD_VAL(2, 8, 7); */	/* L4FORMAT, 2pix/L4 */
+	l |= FLD_VAL(0, 8, 7);	/* L4FORMAT, 1pix/L4 */
+	l |= FLD_VAL(cycleformat, 10, 9);
+	l |= FLD_VAL(0, 12, 11);	/* UNUSEDBITS */
+	l |= FLD_VAL(0, 16, 16);	/* A0POLARITY */
+	l |= FLD_VAL(0, 17, 17);	/* REPOLARITY */
+	l |= FLD_VAL(0, 18, 18);	/* WEPOLARITY */
+	l |= FLD_VAL(0, 19, 19);	/* CSPOLARITY */
+	l |= FLD_VAL(1, 20, 20);	/* TE_VSYNC_POLARITY */
+	l |= FLD_VAL(1, 21, 21);	/* HSYNCPOLARITY */
+	rfbi_write_reg(RFBI_CONFIG(rfbi_module), l);
+
+	rfbi_write_reg(RFBI_DATA_CYCLE1(rfbi_module), cycle1);
+	rfbi_write_reg(RFBI_DATA_CYCLE2(rfbi_module), cycle2);
+	rfbi_write_reg(RFBI_DATA_CYCLE3(rfbi_module), cycle3);
+
+
+	l = rfbi_read_reg(RFBI_CONTROL);
+	l = FLD_MOD(l, rfbi_module+1, 3, 2); /* Select CSx */
+	l = FLD_MOD(l, 0, 1, 1); /* clear bypass */
+	rfbi_write_reg(RFBI_CONTROL, l);
+
+
+	DSSDBG("RFBI config: bpp %d, lines %d, cycles: 0x%x 0x%x 0x%x\n",
+	       bpp, lines, cycle1, cycle2, cycle3);
+
+	rfbi_enable_clocks(0);
+
+	return 0;
+}
+EXPORT_SYMBOL(rfbi_configure);
+
+static int rfbi_find_display(struct omap_dss_device *dssdev)
+{
+	if (dssdev == rfbi.dssdev[0])
+		return 0;
+
+	if (dssdev == rfbi.dssdev[1])
+		return 1;
+
+	BUG();
+	return -1;
+}
+
+
+static void signal_fifo_waiters(void)
+{
+	if (atomic_read(&rfbi.cmd_fifo_full) > 0) {
+		/* DSSDBG("SIGNALING: Fifo not full for waiter!\n"); */
+		complete(&rfbi.cmd_done);
+		atomic_dec(&rfbi.cmd_fifo_full);
+	}
+}
+
+/* returns 1 for async op, and 0 for sync op */
+static int do_update(struct omap_dss_device *dssdev, struct update_region *upd)
+{
+	u16 x = upd->x;
+	u16 y = upd->y;
+	u16 w = upd->w;
+	u16 h = upd->h;
+
+	perf_mark_setup();
+
+	if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
+		/*dssdev->driver->enable_te(dssdev, 1); */
+		dss_setup_partial_planes(dssdev, &x, &y, &w, &h);
+	}
+
+#ifdef MEASURE_PERF
+	rfbi.perf_bytes = w * h * 2; /* XXX always 16bit */
+#endif
+
+	dssdev->driver->setup_update(dssdev, x, y, w, h);
+
+	if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
+		rfbi_transfer_area(w, h, NULL, NULL);
+		return 1;
+	} else {
+		struct omap_overlay *ovl;
+		void __iomem *addr;
+		int scr_width;
+
+		ovl = dssdev->manager->overlays[0];
+		scr_width = ovl->info.screen_width;
+		addr = ovl->info.vaddr;
+
+		omap_rfbi_write_pixels(addr, scr_width, x, y, w, h);
+
+		perf_show("L4");
+
+		return 0;
+	}
+}
+
+static void process_cmd_fifo(void)
+{
+	int len;
+	struct update_param p;
+	struct omap_dss_device *dssdev;
+	unsigned long flags;
+
+	if (atomic_inc_return(&rfbi.cmd_pending) != 1)
+		return;
+
+	while (true) {
+		spin_lock_irqsave(rfbi.cmd_fifo->lock, flags);
+
+		len = __kfifo_get(rfbi.cmd_fifo, (unsigned char *)&p,
+				  sizeof(struct update_param));
+		if (len == 0) {
+			DSSDBG("nothing more in fifo\n");
+			atomic_set(&rfbi.cmd_pending, 0);
+			spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags);
+			break;
+		}
+
+		/* DSSDBG("fifo full %d\n", rfbi.cmd_fifo_full.counter);*/
+
+		spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags);
+
+		BUG_ON(len != sizeof(struct update_param));
+		BUG_ON(p.rfbi_module > 1);
+
+		dssdev = rfbi.dssdev[p.rfbi_module];
+
+		if (p.cmd == RFBI_CMD_UPDATE) {
+			if (do_update(dssdev, &p.par.r))
+				break; /* async op */
+		} else if (p.cmd == RFBI_CMD_SYNC) {
+			DSSDBG("Signaling SYNC done!\n");
+			complete(p.par.sync);
+		} else
+			BUG();
+	}
+
+	signal_fifo_waiters();
+}
+
+static void rfbi_push_cmd(struct update_param *p)
+{
+	int ret;
+
+	while (1) {
+		unsigned long flags;
+		int available;
+
+		spin_lock_irqsave(rfbi.cmd_fifo->lock, flags);
+		available = RFBI_CMD_FIFO_LEN_BYTES -
+			__kfifo_len(rfbi.cmd_fifo);
+
+/*		DSSDBG("%d bytes left in fifo\n", available); */
+		if (available < sizeof(struct update_param)) {
+			DSSDBG("Going to wait because FIFO FULL..\n");
+			spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags);
+			atomic_inc(&rfbi.cmd_fifo_full);
+			wait_for_completion(&rfbi.cmd_done);
+			/*DSSDBG("Woke up because fifo not full anymore\n");*/
+			continue;
+		}
+
+		ret = __kfifo_put(rfbi.cmd_fifo, (unsigned char *)p,
+				  sizeof(struct update_param));
+/*		DSSDBG("pushed %d bytes\n", ret);*/
+
+		spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags);
+
+		BUG_ON(ret != sizeof(struct update_param));
+
+		break;
+	}
+}
+
+static void rfbi_push_update(int rfbi_module, int x, int y, int w, int h)
+{
+	struct update_param p;
+
+	p.rfbi_module = rfbi_module;
+	p.cmd = RFBI_CMD_UPDATE;
+
+	p.par.r.x = x;
+	p.par.r.y = y;
+	p.par.r.w = w;
+	p.par.r.h = h;
+
+	DSSDBG("RFBI pushed %d,%d %dx%d\n", x, y, w, h);
+
+	rfbi_push_cmd(&p);
+
+	process_cmd_fifo();
+}
+
+static void rfbi_push_sync(int rfbi_module, struct completion *sync_comp)
+{
+	struct update_param p;
+
+	p.rfbi_module = rfbi_module;
+	p.cmd = RFBI_CMD_SYNC;
+	p.par.sync = sync_comp;
+
+	rfbi_push_cmd(&p);
+
+	DSSDBG("RFBI sync pushed to cmd fifo\n");
+
+	process_cmd_fifo();
+}
+
+void rfbi_dump_regs(struct seq_file *s)
+{
+#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r))
+
+	dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+	DUMPREG(RFBI_REVISION);
+	DUMPREG(RFBI_SYSCONFIG);
+	DUMPREG(RFBI_SYSSTATUS);
+	DUMPREG(RFBI_CONTROL);
+	DUMPREG(RFBI_PIXEL_CNT);
+	DUMPREG(RFBI_LINE_NUMBER);
+	DUMPREG(RFBI_CMD);
+	DUMPREG(RFBI_PARAM);
+	DUMPREG(RFBI_DATA);
+	DUMPREG(RFBI_READ);
+	DUMPREG(RFBI_STATUS);
+
+	DUMPREG(RFBI_CONFIG(0));
+	DUMPREG(RFBI_ONOFF_TIME(0));
+	DUMPREG(RFBI_CYCLE_TIME(0));
+	DUMPREG(RFBI_DATA_CYCLE1(0));
+	DUMPREG(RFBI_DATA_CYCLE2(0));
+	DUMPREG(RFBI_DATA_CYCLE3(0));
+
+	DUMPREG(RFBI_CONFIG(1));
+	DUMPREG(RFBI_ONOFF_TIME(1));
+	DUMPREG(RFBI_CYCLE_TIME(1));
+	DUMPREG(RFBI_DATA_CYCLE1(1));
+	DUMPREG(RFBI_DATA_CYCLE2(1));
+	DUMPREG(RFBI_DATA_CYCLE3(1));
+
+	DUMPREG(RFBI_VSYNC_WIDTH);
+	DUMPREG(RFBI_HSYNC_WIDTH);
+
+	dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+#undef DUMPREG
+}
+
+int rfbi_init(void)
+{
+	u32 rev;
+	u32 l;
+
+	spin_lock_init(&rfbi.cmd_lock);
+	rfbi.cmd_fifo = kfifo_alloc(RFBI_CMD_FIFO_LEN_BYTES, GFP_KERNEL,
+				    &rfbi.cmd_lock);
+	if (IS_ERR(rfbi.cmd_fifo))
+		return -ENOMEM;
+
+	init_completion(&rfbi.cmd_done);
+	atomic_set(&rfbi.cmd_fifo_full, 0);
+	atomic_set(&rfbi.cmd_pending, 0);
+
+	rfbi.base = ioremap(RFBI_BASE, SZ_256);
+	if (!rfbi.base) {
+		DSSERR("can't ioremap RFBI\n");
+		return -ENOMEM;
+	}
+
+	rfbi_enable_clocks(1);
+
+	msleep(10);
+
+	rfbi.l4_khz = dss_clk_get_rate(DSS_CLK_ICK) / 1000;
+
+	/* Enable autoidle and smart-idle */
+	l = rfbi_read_reg(RFBI_SYSCONFIG);
+	l |= (1 << 0) | (2 << 3);
+	rfbi_write_reg(RFBI_SYSCONFIG, l);
+
+	rev = rfbi_read_reg(RFBI_REVISION);
+	printk(KERN_INFO "OMAP RFBI rev %d.%d\n",
+	       FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+
+	rfbi_enable_clocks(0);
+
+	return 0;
+}
+
+void rfbi_exit(void)
+{
+	DSSDBG("rfbi_exit\n");
+
+	kfifo_free(rfbi.cmd_fifo);
+
+	iounmap(rfbi.base);
+}
+
+/* struct omap_display support */
+static int rfbi_display_update(struct omap_dss_device *dssdev,
+			u16 x, u16 y, u16 w, u16 h)
+{
+	int rfbi_module;
+
+	if (w == 0 || h == 0)
+		return 0;
+
+	rfbi_module = rfbi_find_display(dssdev);
+
+	rfbi_push_update(rfbi_module, x, y, w, h);
+
+	return 0;
+}
+
+static int rfbi_display_sync(struct omap_dss_device *dssdev)
+{
+	struct completion sync_comp;
+	int rfbi_module;
+
+	rfbi_module = rfbi_find_display(dssdev);
+
+	init_completion(&sync_comp);
+	rfbi_push_sync(rfbi_module, &sync_comp);
+	DSSDBG("Waiting for SYNC to happen...\n");
+	wait_for_completion(&sync_comp);
+	DSSDBG("Released from SYNC\n");
+	return 0;
+}
+
+static int rfbi_display_enable_te(struct omap_dss_device *dssdev, bool enable)
+{
+	dssdev->driver->enable_te(dssdev, enable);
+	return 0;
+}
+
+static int rfbi_display_enable(struct omap_dss_device *dssdev)
+{
+	int r;
+
+	r = omap_dss_start_device(dssdev);
+	if (r) {
+		DSSERR("failed to start device\n");
+		goto err0;
+	}
+
+	r = omap_dispc_register_isr(framedone_callback, NULL,
+			DISPC_IRQ_FRAMEDONE);
+	if (r) {
+		DSSERR("can't get FRAMEDONE irq\n");
+		goto err1;
+	}
+
+	dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT);
+
+	dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_RFBI);
+
+	dispc_set_tft_data_lines(dssdev->ctrl.pixel_size);
+
+	rfbi_configure(dssdev->phy.rfbi.channel,
+			       dssdev->ctrl.pixel_size,
+			       dssdev->phy.rfbi.data_lines);
+
+	rfbi_set_timings(dssdev->phy.rfbi.channel,
+			 &dssdev->ctrl.rfbi_timings);
+
+
+	if (dssdev->driver->enable) {
+		r = dssdev->driver->enable(dssdev);
+		if (r)
+			goto err2;
+	}
+
+	return 0;
+err2:
+	omap_dispc_unregister_isr(framedone_callback, NULL,
+			DISPC_IRQ_FRAMEDONE);
+err1:
+	omap_dss_stop_device(dssdev);
+err0:
+	return r;
+}
+
+static void rfbi_display_disable(struct omap_dss_device *dssdev)
+{
+	dssdev->driver->disable(dssdev);
+	omap_dispc_unregister_isr(framedone_callback, NULL,
+			DISPC_IRQ_FRAMEDONE);
+	omap_dss_stop_device(dssdev);
+}
+
+int rfbi_init_display(struct omap_dss_device *dssdev)
+{
+	dssdev->enable = rfbi_display_enable;
+	dssdev->disable = rfbi_display_disable;
+	dssdev->update = rfbi_display_update;
+	dssdev->sync = rfbi_display_sync;
+	dssdev->enable_te = rfbi_display_enable_te;
+
+	rfbi.dssdev[dssdev->phy.rfbi.channel] = dssdev;
+
+	dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
+
+	return 0;
+}
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
new file mode 100644
index 0000000..c24f307
--- /dev/null
+++ b/drivers/video/omap2/dss/sdi.c
@@ -0,0 +1,277 @@
+/*
+ * linux/drivers/video/omap2/dss/sdi.c
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "SDI"
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+
+#include <plat/display.h>
+#include "dss.h"
+
+static struct {
+	bool skip_init;
+	bool update_enabled;
+} sdi;
+
+static void sdi_basic_init(void)
+{
+	dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_BYPASS);
+
+	dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT);
+	dispc_set_tft_data_lines(24);
+	dispc_lcd_enable_signal_polarity(1);
+}
+
+static int sdi_display_enable(struct omap_dss_device *dssdev)
+{
+	struct omap_video_timings *t = &dssdev->panel.timings;
+	struct dss_clock_info dss_cinfo;
+	struct dispc_clock_info dispc_cinfo;
+	u16 lck_div, pck_div;
+	unsigned long fck;
+	unsigned long pck;
+	int r;
+
+	r = omap_dss_start_device(dssdev);
+	if (r) {
+		DSSERR("failed to start device\n");
+		goto err0;
+	}
+
+	if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
+		DSSERR("dssdev already enabled\n");
+		r = -EINVAL;
+		goto err1;
+	}
+
+	/* In case of skip_init sdi_init has already enabled the clocks */
+	if (!sdi.skip_init)
+		dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+	sdi_basic_init();
+
+	/* 15.5.9.1.2 */
+	dssdev->panel.config |= OMAP_DSS_LCD_RF | OMAP_DSS_LCD_ONOFF;
+
+	dispc_set_pol_freq(dssdev->panel.config, dssdev->panel.acbi,
+			dssdev->panel.acb);
+
+	if (!sdi.skip_init) {
+		r = dss_calc_clock_div(1, t->pixel_clock * 1000,
+				&dss_cinfo, &dispc_cinfo);
+	} else {
+		r = dss_get_clock_div(&dss_cinfo);
+		r = dispc_get_clock_div(&dispc_cinfo);
+	}
+
+	if (r)
+		goto err2;
+
+	fck = dss_cinfo.fck;
+	lck_div = dispc_cinfo.lck_div;
+	pck_div = dispc_cinfo.pck_div;
+
+	pck = fck / lck_div / pck_div / 1000;
+
+	if (pck != t->pixel_clock) {
+		DSSWARN("Could not find exact pixel clock. Requested %d kHz, "
+				"got %lu kHz\n",
+				t->pixel_clock, pck);
+
+		t->pixel_clock = pck;
+	}
+
+
+	dispc_set_lcd_timings(t);
+
+	r = dss_set_clock_div(&dss_cinfo);
+	if (r)
+		goto err2;
+
+	r = dispc_set_clock_div(&dispc_cinfo);
+	if (r)
+		goto err2;
+
+	if (!sdi.skip_init) {
+		dss_sdi_init(dssdev->phy.sdi.datapairs);
+		r = dss_sdi_enable();
+		if (r)
+			goto err1;
+		mdelay(2);
+	}
+
+	dispc_enable_lcd_out(1);
+
+	if (dssdev->driver->enable) {
+		r = dssdev->driver->enable(dssdev);
+		if (r)
+			goto err3;
+	}
+
+	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+	sdi.skip_init = 0;
+
+	return 0;
+err3:
+	dispc_enable_lcd_out(0);
+err2:
+	dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+err1:
+	omap_dss_stop_device(dssdev);
+err0:
+	return r;
+}
+
+static int sdi_display_resume(struct omap_dss_device *dssdev);
+
+static void sdi_display_disable(struct omap_dss_device *dssdev)
+{
+	if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED)
+		return;
+
+	if (dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
+		if (sdi_display_resume(dssdev))
+			return;
+
+	if (dssdev->driver->disable)
+		dssdev->driver->disable(dssdev);
+
+	dispc_enable_lcd_out(0);
+
+	dss_sdi_disable();
+
+	dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+	dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+
+	omap_dss_stop_device(dssdev);
+}
+
+static int sdi_display_suspend(struct omap_dss_device *dssdev)
+{
+	if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+		return -EINVAL;
+
+	if (dssdev->driver->suspend)
+		dssdev->driver->suspend(dssdev);
+
+	dispc_enable_lcd_out(0);
+
+	dss_sdi_disable();
+
+	dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+	dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+
+	return 0;
+}
+
+static int sdi_display_resume(struct omap_dss_device *dssdev)
+{
+	int r;
+
+	if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED)
+		return -EINVAL;
+
+	dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+	r = dss_sdi_enable();
+	if (r)
+		goto err;
+	mdelay(2);
+
+	dispc_enable_lcd_out(1);
+
+	if (dssdev->driver->resume)
+		dssdev->driver->resume(dssdev);
+
+	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+	return 0;
+err:
+	dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+	return r;
+}
+
+static int sdi_display_set_update_mode(struct omap_dss_device *dssdev,
+		enum omap_dss_update_mode mode)
+{
+	if (mode == OMAP_DSS_UPDATE_MANUAL)
+		return -EINVAL;
+
+	if (mode == OMAP_DSS_UPDATE_DISABLED) {
+		dispc_enable_lcd_out(0);
+		sdi.update_enabled = 0;
+	} else {
+		dispc_enable_lcd_out(1);
+		sdi.update_enabled = 1;
+	}
+
+	return 0;
+}
+
+static enum omap_dss_update_mode sdi_display_get_update_mode(
+		struct omap_dss_device *dssdev)
+{
+	return sdi.update_enabled ? OMAP_DSS_UPDATE_AUTO :
+		OMAP_DSS_UPDATE_DISABLED;
+}
+
+static void sdi_get_timings(struct omap_dss_device *dssdev,
+			struct omap_video_timings *timings)
+{
+	*timings = dssdev->panel.timings;
+}
+
+int sdi_init_display(struct omap_dss_device *dssdev)
+{
+	DSSDBG("SDI init\n");
+
+	dssdev->enable = sdi_display_enable;
+	dssdev->disable = sdi_display_disable;
+	dssdev->suspend = sdi_display_suspend;
+	dssdev->resume = sdi_display_resume;
+	dssdev->set_update_mode = sdi_display_set_update_mode;
+	dssdev->get_update_mode = sdi_display_get_update_mode;
+	dssdev->get_timings = sdi_get_timings;
+
+	return 0;
+}
+
+int sdi_init(bool skip_init)
+{
+	/* we store this for first display enable, then clear it */
+	sdi.skip_init = skip_init;
+
+	/*
+	 * Enable clocks already here, otherwise there would be a toggle
+	 * of them until sdi_display_enable is called.
+	 */
+	if (skip_init)
+		dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+	return 0;
+}
+
+void sdi_exit(void)
+{
+}
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
new file mode 100644
index 0000000..749a5a0
--- /dev/null
+++ b/drivers/video/omap2/dss/venc.c
@@ -0,0 +1,797 @@
+/*
+ * linux/drivers/video/omap2/dss/venc.c
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * VENC settings from TI's DSS driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "VENC"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/seq_file.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <plat/display.h>
+#include <plat/cpu.h>
+
+#include "dss.h"
+
+#define VENC_BASE	0x48050C00
+
+/* Venc registers */
+#define VENC_REV_ID				0x00
+#define VENC_STATUS				0x04
+#define VENC_F_CONTROL				0x08
+#define VENC_VIDOUT_CTRL			0x10
+#define VENC_SYNC_CTRL				0x14
+#define VENC_LLEN				0x1C
+#define VENC_FLENS				0x20
+#define VENC_HFLTR_CTRL				0x24
+#define VENC_CC_CARR_WSS_CARR			0x28
+#define VENC_C_PHASE				0x2C
+#define VENC_GAIN_U				0x30
+#define VENC_GAIN_V				0x34
+#define VENC_GAIN_Y				0x38
+#define VENC_BLACK_LEVEL			0x3C
+#define VENC_BLANK_LEVEL			0x40
+#define VENC_X_COLOR				0x44
+#define VENC_M_CONTROL				0x48
+#define VENC_BSTAMP_WSS_DATA			0x4C
+#define VENC_S_CARR				0x50
+#define VENC_LINE21				0x54
+#define VENC_LN_SEL				0x58
+#define VENC_L21__WC_CTL			0x5C
+#define VENC_HTRIGGER_VTRIGGER			0x60
+#define VENC_SAVID__EAVID			0x64
+#define VENC_FLEN__FAL				0x68
+#define VENC_LAL__PHASE_RESET			0x6C
+#define VENC_HS_INT_START_STOP_X		0x70
+#define VENC_HS_EXT_START_STOP_X		0x74
+#define VENC_VS_INT_START_X			0x78
+#define VENC_VS_INT_STOP_X__VS_INT_START_Y	0x7C
+#define VENC_VS_INT_STOP_Y__VS_EXT_START_X	0x80
+#define VENC_VS_EXT_STOP_X__VS_EXT_START_Y	0x84
+#define VENC_VS_EXT_STOP_Y			0x88
+#define VENC_AVID_START_STOP_X			0x90
+#define VENC_AVID_START_STOP_Y			0x94
+#define VENC_FID_INT_START_X__FID_INT_START_Y	0xA0
+#define VENC_FID_INT_OFFSET_Y__FID_EXT_START_X	0xA4
+#define VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y	0xA8
+#define VENC_TVDETGP_INT_START_STOP_X		0xB0
+#define VENC_TVDETGP_INT_START_STOP_Y		0xB4
+#define VENC_GEN_CTRL				0xB8
+#define VENC_OUTPUT_CONTROL			0xC4
+#define VENC_OUTPUT_TEST			0xC8
+#define VENC_DAC_B__DAC_C			0xC8
+
+struct venc_config {
+	u32 f_control;
+	u32 vidout_ctrl;
+	u32 sync_ctrl;
+	u32 llen;
+	u32 flens;
+	u32 hfltr_ctrl;
+	u32 cc_carr_wss_carr;
+	u32 c_phase;
+	u32 gain_u;
+	u32 gain_v;
+	u32 gain_y;
+	u32 black_level;
+	u32 blank_level;
+	u32 x_color;
+	u32 m_control;
+	u32 bstamp_wss_data;
+	u32 s_carr;
+	u32 line21;
+	u32 ln_sel;
+	u32 l21__wc_ctl;
+	u32 htrigger_vtrigger;
+	u32 savid__eavid;
+	u32 flen__fal;
+	u32 lal__phase_reset;
+	u32 hs_int_start_stop_x;
+	u32 hs_ext_start_stop_x;
+	u32 vs_int_start_x;
+	u32 vs_int_stop_x__vs_int_start_y;
+	u32 vs_int_stop_y__vs_ext_start_x;
+	u32 vs_ext_stop_x__vs_ext_start_y;
+	u32 vs_ext_stop_y;
+	u32 avid_start_stop_x;
+	u32 avid_start_stop_y;
+	u32 fid_int_start_x__fid_int_start_y;
+	u32 fid_int_offset_y__fid_ext_start_x;
+	u32 fid_ext_start_y__fid_ext_offset_y;
+	u32 tvdetgp_int_start_stop_x;
+	u32 tvdetgp_int_start_stop_y;
+	u32 gen_ctrl;
+};
+
+/* from TRM */
+static const struct venc_config venc_config_pal_trm = {
+	.f_control				= 0,
+	.vidout_ctrl				= 1,
+	.sync_ctrl				= 0x40,
+	.llen					= 0x35F, /* 863 */
+	.flens					= 0x270, /* 624 */
+	.hfltr_ctrl				= 0,
+	.cc_carr_wss_carr			= 0x2F7225ED,
+	.c_phase				= 0,
+	.gain_u					= 0x111,
+	.gain_v					= 0x181,
+	.gain_y					= 0x140,
+	.black_level				= 0x3B,
+	.blank_level				= 0x3B,
+	.x_color				= 0x7,
+	.m_control				= 0x2,
+	.bstamp_wss_data			= 0x3F,
+	.s_carr					= 0x2A098ACB,
+	.line21					= 0,
+	.ln_sel					= 0x01290015,
+	.l21__wc_ctl				= 0x0000F603,
+	.htrigger_vtrigger			= 0,
+
+	.savid__eavid				= 0x06A70108,
+	.flen__fal				= 0x00180270,
+	.lal__phase_reset			= 0x00040135,
+	.hs_int_start_stop_x			= 0x00880358,
+	.hs_ext_start_stop_x			= 0x000F035F,
+	.vs_int_start_x				= 0x01A70000,
+	.vs_int_stop_x__vs_int_start_y		= 0x000001A7,
+	.vs_int_stop_y__vs_ext_start_x		= 0x01AF0000,
+	.vs_ext_stop_x__vs_ext_start_y		= 0x000101AF,
+	.vs_ext_stop_y				= 0x00000025,
+	.avid_start_stop_x			= 0x03530083,
+	.avid_start_stop_y			= 0x026C002E,
+	.fid_int_start_x__fid_int_start_y	= 0x0001008A,
+	.fid_int_offset_y__fid_ext_start_x	= 0x002E0138,
+	.fid_ext_start_y__fid_ext_offset_y	= 0x01380001,
+
+	.tvdetgp_int_start_stop_x		= 0x00140001,
+	.tvdetgp_int_start_stop_y		= 0x00010001,
+	.gen_ctrl				= 0x00FF0000,
+};
+
+/* from TRM */
+static const struct venc_config venc_config_ntsc_trm = {
+	.f_control				= 0,
+	.vidout_ctrl				= 1,
+	.sync_ctrl				= 0x8040,
+	.llen					= 0x359,
+	.flens					= 0x20C,
+	.hfltr_ctrl				= 0,
+	.cc_carr_wss_carr			= 0x043F2631,
+	.c_phase				= 0,
+	.gain_u					= 0x102,
+	.gain_v					= 0x16C,
+	.gain_y					= 0x12F,
+	.black_level				= 0x43,
+	.blank_level				= 0x38,
+	.x_color				= 0x7,
+	.m_control				= 0x1,
+	.bstamp_wss_data			= 0x38,
+	.s_carr					= 0x21F07C1F,
+	.line21					= 0,
+	.ln_sel					= 0x01310011,
+	.l21__wc_ctl				= 0x0000F003,
+	.htrigger_vtrigger			= 0,
+
+	.savid__eavid				= 0x069300F4,
+	.flen__fal				= 0x0016020C,
+	.lal__phase_reset			= 0x00060107,
+	.hs_int_start_stop_x			= 0x008E0350,
+	.hs_ext_start_stop_x			= 0x000F0359,
+	.vs_int_start_x				= 0x01A00000,
+	.vs_int_stop_x__vs_int_start_y		= 0x020701A0,
+	.vs_int_stop_y__vs_ext_start_x		= 0x01AC0024,
+	.vs_ext_stop_x__vs_ext_start_y		= 0x020D01AC,
+	.vs_ext_stop_y				= 0x00000006,
+	.avid_start_stop_x			= 0x03480078,
+	.avid_start_stop_y			= 0x02060024,
+	.fid_int_start_x__fid_int_start_y	= 0x0001008A,
+	.fid_int_offset_y__fid_ext_start_x	= 0x01AC0106,
+	.fid_ext_start_y__fid_ext_offset_y	= 0x01060006,
+
+	.tvdetgp_int_start_stop_x		= 0x00140001,
+	.tvdetgp_int_start_stop_y		= 0x00010001,
+	.gen_ctrl				= 0x00F90000,
+};
+
+static const struct venc_config venc_config_pal_bdghi = {
+	.f_control				= 0,
+	.vidout_ctrl				= 0,
+	.sync_ctrl				= 0,
+	.hfltr_ctrl				= 0,
+	.x_color				= 0,
+	.line21					= 0,
+	.ln_sel					= 21,
+	.htrigger_vtrigger			= 0,
+	.tvdetgp_int_start_stop_x		= 0x00140001,
+	.tvdetgp_int_start_stop_y		= 0x00010001,
+	.gen_ctrl				= 0x00FB0000,
+
+	.llen					= 864-1,
+	.flens					= 625-1,
+	.cc_carr_wss_carr			= 0x2F7625ED,
+	.c_phase				= 0xDF,
+	.gain_u					= 0x111,
+	.gain_v					= 0x181,
+	.gain_y					= 0x140,
+	.black_level				= 0x3e,
+	.blank_level				= 0x3e,
+	.m_control				= 0<<2 | 1<<1,
+	.bstamp_wss_data			= 0x42,
+	.s_carr					= 0x2a098acb,
+	.l21__wc_ctl				= 0<<13 | 0x16<<8 | 0<<0,
+	.savid__eavid				= 0x06A70108,
+	.flen__fal				= 23<<16 | 624<<0,
+	.lal__phase_reset			= 2<<17 | 310<<0,
+	.hs_int_start_stop_x			= 0x00920358,
+	.hs_ext_start_stop_x			= 0x000F035F,
+	.vs_int_start_x				= 0x1a7<<16,
+	.vs_int_stop_x__vs_int_start_y		= 0x000601A7,
+	.vs_int_stop_y__vs_ext_start_x		= 0x01AF0036,
+	.vs_ext_stop_x__vs_ext_start_y		= 0x27101af,
+	.vs_ext_stop_y				= 0x05,
+	.avid_start_stop_x			= 0x03530082,
+	.avid_start_stop_y			= 0x0270002E,
+	.fid_int_start_x__fid_int_start_y	= 0x0005008A,
+	.fid_int_offset_y__fid_ext_start_x	= 0x002E0138,
+	.fid_ext_start_y__fid_ext_offset_y	= 0x01380005,
+};
+
+const struct omap_video_timings omap_dss_pal_timings = {
+	.x_res		= 720,
+	.y_res		= 574,
+	.pixel_clock	= 13500,
+	.hsw		= 64,
+	.hfp		= 12,
+	.hbp		= 68,
+	.vsw		= 5,
+	.vfp		= 5,
+	.vbp		= 41,
+};
+EXPORT_SYMBOL(omap_dss_pal_timings);
+
+const struct omap_video_timings omap_dss_ntsc_timings = {
+	.x_res		= 720,
+	.y_res		= 482,
+	.pixel_clock	= 13500,
+	.hsw		= 64,
+	.hfp		= 16,
+	.hbp		= 58,
+	.vsw		= 6,
+	.vfp		= 6,
+	.vbp		= 31,
+};
+EXPORT_SYMBOL(omap_dss_ntsc_timings);
+
+static struct {
+	void __iomem *base;
+	struct mutex venc_lock;
+	u32 wss_data;
+	struct regulator *vdda_dac_reg;
+} venc;
+
+static inline void venc_write_reg(int idx, u32 val)
+{
+	__raw_writel(val, venc.base + idx);
+}
+
+static inline u32 venc_read_reg(int idx)
+{
+	u32 l = __raw_readl(venc.base + idx);
+	return l;
+}
+
+static void venc_write_config(const struct venc_config *config)
+{
+	DSSDBG("write venc conf\n");
+
+	venc_write_reg(VENC_LLEN, config->llen);
+	venc_write_reg(VENC_FLENS, config->flens);
+	venc_write_reg(VENC_CC_CARR_WSS_CARR, config->cc_carr_wss_carr);
+	venc_write_reg(VENC_C_PHASE, config->c_phase);
+	venc_write_reg(VENC_GAIN_U, config->gain_u);
+	venc_write_reg(VENC_GAIN_V, config->gain_v);
+	venc_write_reg(VENC_GAIN_Y, config->gain_y);
+	venc_write_reg(VENC_BLACK_LEVEL, config->black_level);
+	venc_write_reg(VENC_BLANK_LEVEL, config->blank_level);
+	venc_write_reg(VENC_M_CONTROL, config->m_control);
+	venc_write_reg(VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data |
+			venc.wss_data);
+	venc_write_reg(VENC_S_CARR, config->s_carr);
+	venc_write_reg(VENC_L21__WC_CTL, config->l21__wc_ctl);
+	venc_write_reg(VENC_SAVID__EAVID, config->savid__eavid);
+	venc_write_reg(VENC_FLEN__FAL, config->flen__fal);
+	venc_write_reg(VENC_LAL__PHASE_RESET, config->lal__phase_reset);
+	venc_write_reg(VENC_HS_INT_START_STOP_X, config->hs_int_start_stop_x);
+	venc_write_reg(VENC_HS_EXT_START_STOP_X, config->hs_ext_start_stop_x);
+	venc_write_reg(VENC_VS_INT_START_X, config->vs_int_start_x);
+	venc_write_reg(VENC_VS_INT_STOP_X__VS_INT_START_Y,
+		       config->vs_int_stop_x__vs_int_start_y);
+	venc_write_reg(VENC_VS_INT_STOP_Y__VS_EXT_START_X,
+		       config->vs_int_stop_y__vs_ext_start_x);
+	venc_write_reg(VENC_VS_EXT_STOP_X__VS_EXT_START_Y,
+		       config->vs_ext_stop_x__vs_ext_start_y);
+	venc_write_reg(VENC_VS_EXT_STOP_Y, config->vs_ext_stop_y);
+	venc_write_reg(VENC_AVID_START_STOP_X, config->avid_start_stop_x);
+	venc_write_reg(VENC_AVID_START_STOP_Y, config->avid_start_stop_y);
+	venc_write_reg(VENC_FID_INT_START_X__FID_INT_START_Y,
+		       config->fid_int_start_x__fid_int_start_y);
+	venc_write_reg(VENC_FID_INT_OFFSET_Y__FID_EXT_START_X,
+		       config->fid_int_offset_y__fid_ext_start_x);
+	venc_write_reg(VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y,
+		       config->fid_ext_start_y__fid_ext_offset_y);
+
+	venc_write_reg(VENC_DAC_B__DAC_C,  venc_read_reg(VENC_DAC_B__DAC_C));
+	venc_write_reg(VENC_VIDOUT_CTRL, config->vidout_ctrl);
+	venc_write_reg(VENC_HFLTR_CTRL, config->hfltr_ctrl);
+	venc_write_reg(VENC_X_COLOR, config->x_color);
+	venc_write_reg(VENC_LINE21, config->line21);
+	venc_write_reg(VENC_LN_SEL, config->ln_sel);
+	venc_write_reg(VENC_HTRIGGER_VTRIGGER, config->htrigger_vtrigger);
+	venc_write_reg(VENC_TVDETGP_INT_START_STOP_X,
+		       config->tvdetgp_int_start_stop_x);
+	venc_write_reg(VENC_TVDETGP_INT_START_STOP_Y,
+		       config->tvdetgp_int_start_stop_y);
+	venc_write_reg(VENC_GEN_CTRL, config->gen_ctrl);
+	venc_write_reg(VENC_F_CONTROL, config->f_control);
+	venc_write_reg(VENC_SYNC_CTRL, config->sync_ctrl);
+}
+
+static void venc_reset(void)
+{
+	int t = 1000;
+
+	venc_write_reg(VENC_F_CONTROL, 1<<8);
+	while (venc_read_reg(VENC_F_CONTROL) & (1<<8)) {
+		if (--t == 0) {
+			DSSERR("Failed to reset venc\n");
+			return;
+		}
+	}
+
+	/* the magical sleep that makes things work */
+	msleep(20);
+}
+
+static void venc_enable_clocks(int enable)
+{
+	if (enable)
+		dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_54M |
+				DSS_CLK_96M);
+	else
+		dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_54M |
+				DSS_CLK_96M);
+}
+
+static const struct venc_config *venc_timings_to_config(
+		struct omap_video_timings *timings)
+{
+	if (memcmp(&omap_dss_pal_timings, timings, sizeof(*timings)) == 0)
+		return &venc_config_pal_trm;
+
+	if (memcmp(&omap_dss_ntsc_timings, timings, sizeof(*timings)) == 0)
+		return &venc_config_ntsc_trm;
+
+	BUG();
+}
+
+
+
+
+
+/* driver */
+static int venc_panel_probe(struct omap_dss_device *dssdev)
+{
+	dssdev->panel.timings = omap_dss_pal_timings;
+
+	return 0;
+}
+
+static void venc_panel_remove(struct omap_dss_device *dssdev)
+{
+}
+
+static int venc_panel_enable(struct omap_dss_device *dssdev)
+{
+	int r = 0;
+
+	/* wait couple of vsyncs until enabling the LCD */
+	msleep(50);
+
+	if (dssdev->platform_enable)
+		r = dssdev->platform_enable(dssdev);
+
+	return r;
+}
+
+static void venc_panel_disable(struct omap_dss_device *dssdev)
+{
+	if (dssdev->platform_disable)
+		dssdev->platform_disable(dssdev);
+
+	/* wait at least 5 vsyncs after disabling the LCD */
+
+	msleep(100);
+}
+
+static int venc_panel_suspend(struct omap_dss_device *dssdev)
+{
+	venc_panel_disable(dssdev);
+	return 0;
+}
+
+static int venc_panel_resume(struct omap_dss_device *dssdev)
+{
+	return venc_panel_enable(dssdev);
+}
+
+static struct omap_dss_driver venc_driver = {
+	.probe		= venc_panel_probe,
+	.remove		= venc_panel_remove,
+
+	.enable		= venc_panel_enable,
+	.disable	= venc_panel_disable,
+	.suspend	= venc_panel_suspend,
+	.resume		= venc_panel_resume,
+
+	.driver         = {
+		.name   = "venc",
+		.owner  = THIS_MODULE,
+	},
+};
+/* driver end */
+
+
+
+int venc_init(struct platform_device *pdev)
+{
+	u8 rev_id;
+
+	mutex_init(&venc.venc_lock);
+
+	venc.wss_data = 0;
+
+	venc.base = ioremap(VENC_BASE, SZ_1K);
+	if (!venc.base) {
+		DSSERR("can't ioremap VENC\n");
+		return -ENOMEM;
+	}
+
+	venc.vdda_dac_reg = regulator_get(&pdev->dev, "vdda_dac");
+	if (IS_ERR(venc.vdda_dac_reg)) {
+		iounmap(venc.base);
+		DSSERR("can't get VDDA_DAC regulator\n");
+		return PTR_ERR(venc.vdda_dac_reg);
+	}
+
+	venc_enable_clocks(1);
+
+	rev_id = (u8)(venc_read_reg(VENC_REV_ID) & 0xff);
+	printk(KERN_INFO "OMAP VENC rev %d\n", rev_id);
+
+	venc_enable_clocks(0);
+
+	return omap_dss_register_driver(&venc_driver);
+}
+
+void venc_exit(void)
+{
+	omap_dss_unregister_driver(&venc_driver);
+
+	regulator_put(venc.vdda_dac_reg);
+
+	iounmap(venc.base);
+}
+
+static void venc_power_on(struct omap_dss_device *dssdev)
+{
+	u32 l;
+
+	venc_enable_clocks(1);
+
+	venc_reset();
+	venc_write_config(venc_timings_to_config(&dssdev->panel.timings));
+
+	dss_set_venc_output(dssdev->phy.venc.type);
+	dss_set_dac_pwrdn_bgz(1);
+
+	l = 0;
+
+	if (dssdev->phy.venc.type == OMAP_DSS_VENC_TYPE_COMPOSITE)
+		l |= 1 << 1;
+	else /* S-Video */
+		l |= (1 << 0) | (1 << 2);
+
+	if (dssdev->phy.venc.invert_polarity == false)
+		l |= 1 << 3;
+
+	venc_write_reg(VENC_OUTPUT_CONTROL, l);
+
+	dispc_set_digit_size(dssdev->panel.timings.x_res,
+			dssdev->panel.timings.y_res/2);
+
+	regulator_enable(venc.vdda_dac_reg);
+
+	if (dssdev->platform_enable)
+		dssdev->platform_enable(dssdev);
+
+	dispc_enable_digit_out(1);
+}
+
+static void venc_power_off(struct omap_dss_device *dssdev)
+{
+	venc_write_reg(VENC_OUTPUT_CONTROL, 0);
+	dss_set_dac_pwrdn_bgz(0);
+
+	dispc_enable_digit_out(0);
+
+	if (dssdev->platform_disable)
+		dssdev->platform_disable(dssdev);
+
+	regulator_disable(venc.vdda_dac_reg);
+
+	venc_enable_clocks(0);
+}
+
+static int venc_enable_display(struct omap_dss_device *dssdev)
+{
+	int r = 0;
+
+	DSSDBG("venc_enable_display\n");
+
+	mutex_lock(&venc.venc_lock);
+
+	if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
+		r = -EINVAL;
+		goto err;
+	}
+
+	venc_power_on(dssdev);
+
+	venc.wss_data = 0;
+
+	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+err:
+	mutex_unlock(&venc.venc_lock);
+
+	return r;
+}
+
+static void venc_disable_display(struct omap_dss_device *dssdev)
+{
+	DSSDBG("venc_disable_display\n");
+
+	mutex_lock(&venc.venc_lock);
+
+	if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED)
+		goto end;
+
+	if (dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED) {
+		/* suspended is the same as disabled with venc */
+		dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+		goto end;
+	}
+
+	venc_power_off(dssdev);
+
+	dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+end:
+	mutex_unlock(&venc.venc_lock);
+}
+
+static int venc_display_suspend(struct omap_dss_device *dssdev)
+{
+	int r = 0;
+
+	DSSDBG("venc_display_suspend\n");
+
+	mutex_lock(&venc.venc_lock);
+
+	if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
+		r = -EINVAL;
+		goto err;
+	}
+
+	venc_power_off(dssdev);
+
+	dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+err:
+	mutex_unlock(&venc.venc_lock);
+
+	return r;
+}
+
+static int venc_display_resume(struct omap_dss_device *dssdev)
+{
+	int r = 0;
+
+	DSSDBG("venc_display_resume\n");
+
+	mutex_lock(&venc.venc_lock);
+
+	if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
+		r = -EINVAL;
+		goto err;
+	}
+
+	venc_power_on(dssdev);
+
+	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+err:
+	mutex_unlock(&venc.venc_lock);
+
+	return r;
+}
+
+static void venc_get_timings(struct omap_dss_device *dssdev,
+			struct omap_video_timings *timings)
+{
+	*timings = dssdev->panel.timings;
+}
+
+static void venc_set_timings(struct omap_dss_device *dssdev,
+			struct omap_video_timings *timings)
+{
+	DSSDBG("venc_set_timings\n");
+
+	/* Reset WSS data when the TV standard changes. */
+	if (memcmp(&dssdev->panel.timings, timings, sizeof(*timings)))
+		venc.wss_data = 0;
+
+	dssdev->panel.timings = *timings;
+	if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+		/* turn the venc off and on to get new timings to use */
+		venc_disable_display(dssdev);
+		venc_enable_display(dssdev);
+	}
+}
+
+static int venc_check_timings(struct omap_dss_device *dssdev,
+			struct omap_video_timings *timings)
+{
+	DSSDBG("venc_check_timings\n");
+
+	if (memcmp(&omap_dss_pal_timings, timings, sizeof(*timings)) == 0)
+		return 0;
+
+	if (memcmp(&omap_dss_ntsc_timings, timings, sizeof(*timings)) == 0)
+		return 0;
+
+	return -EINVAL;
+}
+
+static u32 venc_get_wss(struct omap_dss_device *dssdev)
+{
+	/* Invert due to VENC_L21_WC_CTL:INV=1 */
+	return (venc.wss_data >> 8) ^ 0xfffff;
+}
+
+static int venc_set_wss(struct omap_dss_device *dssdev,	u32 wss)
+{
+	const struct venc_config *config;
+
+	DSSDBG("venc_set_wss\n");
+
+	mutex_lock(&venc.venc_lock);
+
+	config = venc_timings_to_config(&dssdev->panel.timings);
+
+	/* Invert due to VENC_L21_WC_CTL:INV=1 */
+	venc.wss_data = (wss ^ 0xfffff) << 8;
+
+	venc_enable_clocks(1);
+
+	venc_write_reg(VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data |
+			venc.wss_data);
+
+	venc_enable_clocks(0);
+
+	mutex_unlock(&venc.venc_lock);
+
+	return 0;
+}
+
+static enum omap_dss_update_mode venc_display_get_update_mode(
+		struct omap_dss_device *dssdev)
+{
+	if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+		return OMAP_DSS_UPDATE_AUTO;
+	else
+		return OMAP_DSS_UPDATE_DISABLED;
+}
+
+int venc_init_display(struct omap_dss_device *dssdev)
+{
+	DSSDBG("init_display\n");
+
+	dssdev->enable = venc_enable_display;
+	dssdev->disable = venc_disable_display;
+	dssdev->suspend = venc_display_suspend;
+	dssdev->resume = venc_display_resume;
+	dssdev->get_timings = venc_get_timings;
+	dssdev->set_timings = venc_set_timings;
+	dssdev->check_timings = venc_check_timings;
+	dssdev->get_wss = venc_get_wss;
+	dssdev->set_wss = venc_set_wss;
+	dssdev->get_update_mode = venc_display_get_update_mode;
+
+	return 0;
+}
+
+void venc_dump_regs(struct seq_file *s)
+{
+#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r))
+
+	venc_enable_clocks(1);
+
+	DUMPREG(VENC_F_CONTROL);
+	DUMPREG(VENC_VIDOUT_CTRL);
+	DUMPREG(VENC_SYNC_CTRL);
+	DUMPREG(VENC_LLEN);
+	DUMPREG(VENC_FLENS);
+	DUMPREG(VENC_HFLTR_CTRL);
+	DUMPREG(VENC_CC_CARR_WSS_CARR);
+	DUMPREG(VENC_C_PHASE);
+	DUMPREG(VENC_GAIN_U);
+	DUMPREG(VENC_GAIN_V);
+	DUMPREG(VENC_GAIN_Y);
+	DUMPREG(VENC_BLACK_LEVEL);
+	DUMPREG(VENC_BLANK_LEVEL);
+	DUMPREG(VENC_X_COLOR);
+	DUMPREG(VENC_M_CONTROL);
+	DUMPREG(VENC_BSTAMP_WSS_DATA);
+	DUMPREG(VENC_S_CARR);
+	DUMPREG(VENC_LINE21);
+	DUMPREG(VENC_LN_SEL);
+	DUMPREG(VENC_L21__WC_CTL);
+	DUMPREG(VENC_HTRIGGER_VTRIGGER);
+	DUMPREG(VENC_SAVID__EAVID);
+	DUMPREG(VENC_FLEN__FAL);
+	DUMPREG(VENC_LAL__PHASE_RESET);
+	DUMPREG(VENC_HS_INT_START_STOP_X);
+	DUMPREG(VENC_HS_EXT_START_STOP_X);
+	DUMPREG(VENC_VS_INT_START_X);
+	DUMPREG(VENC_VS_INT_STOP_X__VS_INT_START_Y);
+	DUMPREG(VENC_VS_INT_STOP_Y__VS_EXT_START_X);
+	DUMPREG(VENC_VS_EXT_STOP_X__VS_EXT_START_Y);
+	DUMPREG(VENC_VS_EXT_STOP_Y);
+	DUMPREG(VENC_AVID_START_STOP_X);
+	DUMPREG(VENC_AVID_START_STOP_Y);
+	DUMPREG(VENC_FID_INT_START_X__FID_INT_START_Y);
+	DUMPREG(VENC_FID_INT_OFFSET_Y__FID_EXT_START_X);
+	DUMPREG(VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y);
+	DUMPREG(VENC_TVDETGP_INT_START_STOP_X);
+	DUMPREG(VENC_TVDETGP_INT_START_STOP_Y);
+	DUMPREG(VENC_GEN_CTRL);
+	DUMPREG(VENC_OUTPUT_CONTROL);
+	DUMPREG(VENC_OUTPUT_TEST);
+
+	venc_enable_clocks(0);
+
+#undef DUMPREG
+}
diff --git a/drivers/video/omap2/omapfb/Kconfig b/drivers/video/omap2/omapfb/Kconfig
new file mode 100644
index 0000000..bb694cc
--- /dev/null
+++ b/drivers/video/omap2/omapfb/Kconfig
@@ -0,0 +1,37 @@
+menuconfig FB_OMAP2
+        tristate "OMAP2/3 frame buffer support (EXPERIMENTAL)"
+        depends on FB && OMAP2_DSS
+
+	select OMAP2_VRAM
+	select OMAP2_VRFB
+        select FB_CFB_FILLRECT
+        select FB_CFB_COPYAREA
+        select FB_CFB_IMAGEBLIT
+        help
+          Frame buffer driver for OMAP2/3 based boards.
+
+config FB_OMAP2_DEBUG_SUPPORT
+	bool "Debug support for OMAP2/3 FB"
+	default y
+	depends on FB_OMAP2
+	help
+	  Support for debug output. You have to enable the actual printing
+	  with debug module parameter.
+
+config FB_OMAP2_FORCE_AUTO_UPDATE
+	bool "Force main display to automatic update mode"
+	depends on FB_OMAP2
+	help
+	  Forces main display to automatic update mode (if possible),
+	  and also enables tearsync (if possible). By default
+	  displays that support manual update are started in manual
+	  update mode.
+
+config FB_OMAP2_NUM_FBS
+	int "Number of framebuffers"
+	range 1 10
+	default 3
+	depends on FB_OMAP2
+	help
+	  Select the number of framebuffers created. OMAP2/3 has 3 overlays
+	  so normally this would be 3.
diff --git a/drivers/video/omap2/omapfb/Makefile b/drivers/video/omap2/omapfb/Makefile
new file mode 100644
index 0000000..51c2e00
--- /dev/null
+++ b/drivers/video/omap2/omapfb/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_FB_OMAP2) += omapfb.o
+omapfb-y := omapfb-main.o omapfb-sysfs.o omapfb-ioctl.o
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
new file mode 100644
index 0000000..4c4bafd
--- /dev/null
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -0,0 +1,755 @@
+/*
+ * linux/drivers/video/omap2/omapfb-ioctl.c
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/fb.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/mm.h>
+#include <linux/omapfb.h>
+#include <linux/vmalloc.h>
+
+#include <plat/display.h>
+#include <plat/vrfb.h>
+#include <plat/vram.h>
+
+#include "omapfb.h"
+
+static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
+{
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	struct omapfb2_device *fbdev = ofbi->fbdev;
+	struct omap_overlay *ovl;
+	struct omap_overlay_info info;
+	int r = 0;
+
+	DBG("omapfb_setup_plane\n");
+
+	if (ofbi->num_overlays != 1) {
+		r = -EINVAL;
+		goto out;
+	}
+
+	/* XXX uses only the first overlay */
+	ovl = ofbi->overlays[0];
+
+	if (pi->enabled && !ofbi->region.size) {
+		/*
+		 * This plane's memory was freed, can't enable it
+		 * until it's reallocated.
+		 */
+		r = -EINVAL;
+		goto out;
+	}
+
+	ovl->get_overlay_info(ovl, &info);
+
+	info.pos_x = pi->pos_x;
+	info.pos_y = pi->pos_y;
+	info.out_width = pi->out_width;
+	info.out_height = pi->out_height;
+	info.enabled = pi->enabled;
+
+	r = ovl->set_overlay_info(ovl, &info);
+	if (r)
+		goto out;
+
+	if (ovl->manager) {
+		r = ovl->manager->apply(ovl->manager);
+		if (r)
+			goto out;
+	}
+
+out:
+	if (r)
+		dev_err(fbdev->dev, "setup_plane failed\n");
+	return r;
+}
+
+static int omapfb_query_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
+{
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+
+	if (ofbi->num_overlays != 1) {
+		memset(pi, 0, sizeof(*pi));
+	} else {
+		struct omap_overlay_info *ovli;
+		struct omap_overlay *ovl;
+
+		ovl = ofbi->overlays[0];
+		ovli = &ovl->info;
+
+		pi->pos_x = ovli->pos_x;
+		pi->pos_y = ovli->pos_y;
+		pi->enabled = ovli->enabled;
+		pi->channel_out = 0; /* xxx */
+		pi->mirror = 0;
+		pi->out_width = ovli->out_width;
+		pi->out_height = ovli->out_height;
+	}
+
+	return 0;
+}
+
+static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
+{
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	struct omapfb2_device *fbdev = ofbi->fbdev;
+	struct omapfb2_mem_region *rg;
+	int r, i;
+	size_t size;
+
+	if (mi->type > OMAPFB_MEMTYPE_MAX)
+		return -EINVAL;
+
+	size = PAGE_ALIGN(mi->size);
+
+	rg = &ofbi->region;
+
+	for (i = 0; i < ofbi->num_overlays; i++) {
+		if (ofbi->overlays[i]->info.enabled)
+			return -EBUSY;
+	}
+
+	if (rg->size != size || rg->type != mi->type) {
+		r = omapfb_realloc_fbmem(fbi, size, mi->type);
+		if (r) {
+			dev_err(fbdev->dev, "realloc fbmem failed\n");
+			return r;
+		}
+	}
+
+	return 0;
+}
+
+static int omapfb_query_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
+{
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	struct omapfb2_mem_region *rg;
+
+	rg = &ofbi->region;
+	memset(mi, 0, sizeof(*mi));
+
+	mi->size = rg->size;
+	mi->type = rg->type;
+
+	return 0;
+}
+
+static int omapfb_update_window_nolock(struct fb_info *fbi,
+		u32 x, u32 y, u32 w, u32 h)
+{
+	struct omap_dss_device *display = fb2display(fbi);
+	u16 dw, dh;
+
+	if (!display)
+		return 0;
+
+	if (w == 0 || h == 0)
+		return 0;
+
+	display->get_resolution(display, &dw, &dh);
+
+	if (x + w > dw || y + h > dh)
+		return -EINVAL;
+
+	return display->update(display, x, y, w, h);
+}
+
+/* This function is exported for SGX driver use */
+int omapfb_update_window(struct fb_info *fbi,
+		u32 x, u32 y, u32 w, u32 h)
+{
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	struct omapfb2_device *fbdev = ofbi->fbdev;
+	int r;
+
+	omapfb_lock(fbdev);
+	lock_fb_info(fbi);
+
+	r = omapfb_update_window_nolock(fbi, x, y, w, h);
+
+	unlock_fb_info(fbi);
+	omapfb_unlock(fbdev);
+
+	return r;
+}
+EXPORT_SYMBOL(omapfb_update_window);
+
+static int omapfb_set_update_mode(struct fb_info *fbi,
+				   enum omapfb_update_mode mode)
+{
+	struct omap_dss_device *display = fb2display(fbi);
+	enum omap_dss_update_mode um;
+	int r;
+
+	if (!display || !display->set_update_mode)
+		return -EINVAL;
+
+	switch (mode) {
+	case OMAPFB_UPDATE_DISABLED:
+		um = OMAP_DSS_UPDATE_DISABLED;
+		break;
+
+	case OMAPFB_AUTO_UPDATE:
+		um = OMAP_DSS_UPDATE_AUTO;
+		break;
+
+	case OMAPFB_MANUAL_UPDATE:
+		um = OMAP_DSS_UPDATE_MANUAL;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	r = display->set_update_mode(display, um);
+
+	return r;
+}
+
+static int omapfb_get_update_mode(struct fb_info *fbi,
+		enum omapfb_update_mode *mode)
+{
+	struct omap_dss_device *display = fb2display(fbi);
+	enum omap_dss_update_mode m;
+
+	if (!display || !display->get_update_mode)
+		return -EINVAL;
+
+	m = display->get_update_mode(display);
+
+	switch (m) {
+	case OMAP_DSS_UPDATE_DISABLED:
+		*mode = OMAPFB_UPDATE_DISABLED;
+		break;
+	case OMAP_DSS_UPDATE_AUTO:
+		*mode = OMAPFB_AUTO_UPDATE;
+		break;
+	case OMAP_DSS_UPDATE_MANUAL:
+		*mode = OMAPFB_MANUAL_UPDATE;
+		break;
+	default:
+		BUG();
+	}
+
+	return 0;
+}
+
+/* XXX this color key handling is a hack... */
+static struct omapfb_color_key omapfb_color_keys[2];
+
+static int _omapfb_set_color_key(struct omap_overlay_manager *mgr,
+		struct omapfb_color_key *ck)
+{
+	struct omap_overlay_manager_info info;
+	enum omap_dss_trans_key_type kt;
+	int r;
+
+	mgr->get_manager_info(mgr, &info);
+
+	if (ck->key_type == OMAPFB_COLOR_KEY_DISABLED) {
+		info.trans_enabled = false;
+		omapfb_color_keys[mgr->id] = *ck;
+
+		r = mgr->set_manager_info(mgr, &info);
+		if (r)
+			return r;
+
+		r = mgr->apply(mgr);
+
+		return r;
+	}
+
+	switch (ck->key_type) {
+	case OMAPFB_COLOR_KEY_GFX_DST:
+		kt = OMAP_DSS_COLOR_KEY_GFX_DST;
+		break;
+	case OMAPFB_COLOR_KEY_VID_SRC:
+		kt = OMAP_DSS_COLOR_KEY_VID_SRC;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	info.default_color = ck->background;
+	info.trans_key = ck->trans_key;
+	info.trans_key_type = kt;
+	info.trans_enabled = true;
+
+	omapfb_color_keys[mgr->id] = *ck;
+
+	r = mgr->set_manager_info(mgr, &info);
+	if (r)
+		return r;
+
+	r = mgr->apply(mgr);
+
+	return r;
+}
+
+static int omapfb_set_color_key(struct fb_info *fbi,
+		struct omapfb_color_key *ck)
+{
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	struct omapfb2_device *fbdev = ofbi->fbdev;
+	int r;
+	int i;
+	struct omap_overlay_manager *mgr = NULL;
+
+	omapfb_lock(fbdev);
+
+	for (i = 0; i < ofbi->num_overlays; i++) {
+		if (ofbi->overlays[i]->manager) {
+			mgr = ofbi->overlays[i]->manager;
+			break;
+		}
+	}
+
+	if (!mgr) {
+		r = -EINVAL;
+		goto err;
+	}
+
+	r = _omapfb_set_color_key(mgr, ck);
+err:
+	omapfb_unlock(fbdev);
+
+	return r;
+}
+
+static int omapfb_get_color_key(struct fb_info *fbi,
+		struct omapfb_color_key *ck)
+{
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	struct omapfb2_device *fbdev = ofbi->fbdev;
+	struct omap_overlay_manager *mgr = NULL;
+	int r = 0;
+	int i;
+
+	omapfb_lock(fbdev);
+
+	for (i = 0; i < ofbi->num_overlays; i++) {
+		if (ofbi->overlays[i]->manager) {
+			mgr = ofbi->overlays[i]->manager;
+			break;
+		}
+	}
+
+	if (!mgr) {
+		r = -EINVAL;
+		goto err;
+	}
+
+	*ck = omapfb_color_keys[mgr->id];
+err:
+	omapfb_unlock(fbdev);
+
+	return r;
+}
+
+static int omapfb_memory_read(struct fb_info *fbi,
+		struct omapfb_memory_read *mr)
+{
+	struct omap_dss_device *display = fb2display(fbi);
+	void *buf;
+	int r;
+
+	if (!display || !display->memory_read)
+		return -ENOENT;
+
+	if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size))
+		return -EFAULT;
+
+	if (mr->w * mr->h * 3 > mr->buffer_size)
+		return -EINVAL;
+
+	buf = vmalloc(mr->buffer_size);
+	if (!buf) {
+		DBG("vmalloc failed\n");
+		return -ENOMEM;
+	}
+
+	r = display->memory_read(display, buf, mr->buffer_size,
+			mr->x, mr->y, mr->w, mr->h);
+
+	if (r > 0) {
+		if (copy_to_user(mr->buffer, buf, mr->buffer_size))
+			r = -EFAULT;
+	}
+
+	vfree(buf);
+
+	return r;
+}
+
+static int omapfb_get_ovl_colormode(struct omapfb2_device *fbdev,
+			     struct omapfb_ovl_colormode *mode)
+{
+	int ovl_idx = mode->overlay_idx;
+	int mode_idx = mode->mode_idx;
+	struct omap_overlay *ovl;
+	enum omap_color_mode supported_modes;
+	struct fb_var_screeninfo var;
+	int i;
+
+	if (ovl_idx >= fbdev->num_overlays)
+		return -ENODEV;
+	ovl = fbdev->overlays[ovl_idx];
+	supported_modes = ovl->supported_modes;
+
+	mode_idx = mode->mode_idx;
+
+	for (i = 0; i < sizeof(supported_modes) * 8; i++) {
+		if (!(supported_modes & (1 << i)))
+			continue;
+		/*
+		 * It's possible that the FB doesn't support a mode
+		 * that is supported by the overlay, so call the
+		 * following here.
+		 */
+		if (dss_mode_to_fb_mode(1 << i, &var) < 0)
+			continue;
+
+		mode_idx--;
+		if (mode_idx < 0)
+			break;
+	}
+
+	if (i == sizeof(supported_modes) * 8)
+		return -ENOENT;
+
+	mode->bits_per_pixel = var.bits_per_pixel;
+	mode->nonstd = var.nonstd;
+	mode->red = var.red;
+	mode->green = var.green;
+	mode->blue = var.blue;
+	mode->transp = var.transp;
+
+	return 0;
+}
+
+static int omapfb_wait_for_go(struct fb_info *fbi)
+{
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	int r = 0;
+	int i;
+
+	for (i = 0; i < ofbi->num_overlays; ++i) {
+		struct omap_overlay *ovl = ofbi->overlays[i];
+		r = ovl->wait_for_go(ovl);
+		if (r)
+			break;
+	}
+
+	return r;
+}
+
+int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
+{
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	struct omapfb2_device *fbdev = ofbi->fbdev;
+	struct omap_dss_device *display = fb2display(fbi);
+
+	union {
+		struct omapfb_update_window_old	uwnd_o;
+		struct omapfb_update_window	uwnd;
+		struct omapfb_plane_info	plane_info;
+		struct omapfb_caps		caps;
+		struct omapfb_mem_info          mem_info;
+		struct omapfb_color_key		color_key;
+		struct omapfb_ovl_colormode	ovl_colormode;
+		enum omapfb_update_mode		update_mode;
+		int test_num;
+		struct omapfb_memory_read	memory_read;
+		struct omapfb_vram_info		vram_info;
+		struct omapfb_tearsync_info	tearsync_info;
+	} p;
+
+	int r = 0;
+
+	switch (cmd) {
+	case OMAPFB_SYNC_GFX:
+		DBG("ioctl SYNC_GFX\n");
+		if (!display || !display->sync) {
+			/* DSS1 never returns an error here, so we neither */
+			/*r = -EINVAL;*/
+			break;
+		}
+
+		r = display->sync(display);
+		break;
+
+	case OMAPFB_UPDATE_WINDOW_OLD:
+		DBG("ioctl UPDATE_WINDOW_OLD\n");
+		if (!display || !display->update) {
+			r = -EINVAL;
+			break;
+		}
+
+		if (copy_from_user(&p.uwnd_o,
+					(void __user *)arg,
+					sizeof(p.uwnd_o))) {
+			r = -EFAULT;
+			break;
+		}
+
+		r = omapfb_update_window_nolock(fbi, p.uwnd_o.x, p.uwnd_o.y,
+				p.uwnd_o.width, p.uwnd_o.height);
+		break;
+
+	case OMAPFB_UPDATE_WINDOW:
+		DBG("ioctl UPDATE_WINDOW\n");
+		if (!display || !display->update) {
+			r = -EINVAL;
+			break;
+		}
+
+		if (copy_from_user(&p.uwnd, (void __user *)arg,
+					sizeof(p.uwnd))) {
+			r = -EFAULT;
+			break;
+		}
+
+		r = omapfb_update_window_nolock(fbi, p.uwnd.x, p.uwnd.y,
+				p.uwnd.width, p.uwnd.height);
+		break;
+
+	case OMAPFB_SETUP_PLANE:
+		DBG("ioctl SETUP_PLANE\n");
+		if (copy_from_user(&p.plane_info, (void __user *)arg,
+					sizeof(p.plane_info)))
+			r = -EFAULT;
+		else
+			r = omapfb_setup_plane(fbi, &p.plane_info);
+		break;
+
+	case OMAPFB_QUERY_PLANE:
+		DBG("ioctl QUERY_PLANE\n");
+		r = omapfb_query_plane(fbi, &p.plane_info);
+		if (r < 0)
+			break;
+		if (copy_to_user((void __user *)arg, &p.plane_info,
+					sizeof(p.plane_info)))
+			r = -EFAULT;
+		break;
+
+	case OMAPFB_SETUP_MEM:
+		DBG("ioctl SETUP_MEM\n");
+		if (copy_from_user(&p.mem_info, (void __user *)arg,
+					sizeof(p.mem_info)))
+			r = -EFAULT;
+		else
+			r = omapfb_setup_mem(fbi, &p.mem_info);
+		break;
+
+	case OMAPFB_QUERY_MEM:
+		DBG("ioctl QUERY_MEM\n");
+		r = omapfb_query_mem(fbi, &p.mem_info);
+		if (r < 0)
+			break;
+		if (copy_to_user((void __user *)arg, &p.mem_info,
+					sizeof(p.mem_info)))
+			r = -EFAULT;
+		break;
+
+	case OMAPFB_GET_CAPS:
+		DBG("ioctl GET_CAPS\n");
+		if (!display) {
+			r = -EINVAL;
+			break;
+		}
+
+		memset(&p.caps, 0, sizeof(p.caps));
+		if (display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE)
+			p.caps.ctrl |= OMAPFB_CAPS_MANUAL_UPDATE;
+		if (display->caps & OMAP_DSS_DISPLAY_CAP_TEAR_ELIM)
+			p.caps.ctrl |= OMAPFB_CAPS_TEARSYNC;
+
+		if (copy_to_user((void __user *)arg, &p.caps, sizeof(p.caps)))
+			r = -EFAULT;
+		break;
+
+	case OMAPFB_GET_OVERLAY_COLORMODE:
+		DBG("ioctl GET_OVERLAY_COLORMODE\n");
+		if (copy_from_user(&p.ovl_colormode, (void __user *)arg,
+				   sizeof(p.ovl_colormode))) {
+			r = -EFAULT;
+			break;
+		}
+		r = omapfb_get_ovl_colormode(fbdev, &p.ovl_colormode);
+		if (r < 0)
+			break;
+		if (copy_to_user((void __user *)arg, &p.ovl_colormode,
+				 sizeof(p.ovl_colormode)))
+			r = -EFAULT;
+		break;
+
+	case OMAPFB_SET_UPDATE_MODE:
+		DBG("ioctl SET_UPDATE_MODE\n");
+		if (get_user(p.update_mode, (int __user *)arg))
+			r = -EFAULT;
+		else
+			r = omapfb_set_update_mode(fbi, p.update_mode);
+		break;
+
+	case OMAPFB_GET_UPDATE_MODE:
+		DBG("ioctl GET_UPDATE_MODE\n");
+		r = omapfb_get_update_mode(fbi, &p.update_mode);
+		if (r)
+			break;
+		if (put_user(p.update_mode,
+					(enum omapfb_update_mode __user *)arg))
+			r = -EFAULT;
+		break;
+
+	case OMAPFB_SET_COLOR_KEY:
+		DBG("ioctl SET_COLOR_KEY\n");
+		if (copy_from_user(&p.color_key, (void __user *)arg,
+				   sizeof(p.color_key)))
+			r = -EFAULT;
+		else
+			r = omapfb_set_color_key(fbi, &p.color_key);
+		break;
+
+	case OMAPFB_GET_COLOR_KEY:
+		DBG("ioctl GET_COLOR_KEY\n");
+		r = omapfb_get_color_key(fbi, &p.color_key);
+		if (r)
+			break;
+		if (copy_to_user((void __user *)arg, &p.color_key,
+				 sizeof(p.color_key)))
+			r = -EFAULT;
+		break;
+
+	case OMAPFB_WAITFORVSYNC:
+		DBG("ioctl WAITFORVSYNC\n");
+		if (!display) {
+			r = -EINVAL;
+			break;
+		}
+
+		r = display->wait_vsync(display);
+		break;
+
+	case OMAPFB_WAITFORGO:
+		DBG("ioctl WAITFORGO\n");
+		if (!display) {
+			r = -EINVAL;
+			break;
+		}
+
+		r = omapfb_wait_for_go(fbi);
+		break;
+
+	/* LCD and CTRL tests do the same thing for backward
+	 * compatibility */
+	case OMAPFB_LCD_TEST:
+		DBG("ioctl LCD_TEST\n");
+		if (get_user(p.test_num, (int __user *)arg)) {
+			r = -EFAULT;
+			break;
+		}
+		if (!display || !display->run_test) {
+			r = -EINVAL;
+			break;
+		}
+
+		r = display->run_test(display, p.test_num);
+
+		break;
+
+	case OMAPFB_CTRL_TEST:
+		DBG("ioctl CTRL_TEST\n");
+		if (get_user(p.test_num, (int __user *)arg)) {
+			r = -EFAULT;
+			break;
+		}
+		if (!display || !display->run_test) {
+			r = -EINVAL;
+			break;
+		}
+
+		r = display->run_test(display, p.test_num);
+
+		break;
+
+	case OMAPFB_MEMORY_READ:
+		DBG("ioctl MEMORY_READ\n");
+
+		if (copy_from_user(&p.memory_read, (void __user *)arg,
+					sizeof(p.memory_read))) {
+			r = -EFAULT;
+			break;
+		}
+
+		r = omapfb_memory_read(fbi, &p.memory_read);
+
+		break;
+
+	case OMAPFB_GET_VRAM_INFO: {
+		unsigned long vram, free, largest;
+
+		DBG("ioctl GET_VRAM_INFO\n");
+
+		omap_vram_get_info(&vram, &free, &largest);
+		p.vram_info.total = vram;
+		p.vram_info.free = free;
+		p.vram_info.largest_free_block = largest;
+
+		if (copy_to_user((void __user *)arg, &p.vram_info,
+					sizeof(p.vram_info)))
+			r = -EFAULT;
+		break;
+	}
+
+	case OMAPFB_SET_TEARSYNC: {
+		DBG("ioctl SET_TEARSYNC\n");
+
+		if (copy_from_user(&p.tearsync_info, (void __user *)arg,
+					sizeof(p.tearsync_info))) {
+			r = -EFAULT;
+			break;
+		}
+
+		if (!display->enable_te) {
+			r = -ENODEV;
+			break;
+		}
+
+		r = display->enable_te(display, !!p.tearsync_info.enabled);
+
+		break;
+	}
+
+	default:
+		dev_err(fbdev->dev, "Unknown ioctl 0x%x\n", cmd);
+		r = -EINVAL;
+	}
+
+	if (r < 0)
+		DBG("ioctl failed: %d\n", r);
+
+	return r;
+}
+
+
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
new file mode 100644
index 0000000..ef29983
--- /dev/null
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -0,0 +1,2261 @@
+/*
+ * linux/drivers/video/omap2/omapfb-main.c
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/omapfb.h>
+
+#include <plat/display.h>
+#include <plat/vram.h>
+#include <plat/vrfb.h>
+
+#include "omapfb.h"
+
+#define MODULE_NAME     "omapfb"
+
+#define OMAPFB_PLANE_XRES_MIN		8
+#define OMAPFB_PLANE_YRES_MIN		8
+
+static char *def_mode;
+static char *def_vram;
+static int def_vrfb;
+static int def_rotate;
+static int def_mirror;
+
+#ifdef DEBUG
+unsigned int omapfb_debug;
+module_param_named(debug, omapfb_debug, bool, 0644);
+static unsigned int omapfb_test_pattern;
+module_param_named(test, omapfb_test_pattern, bool, 0644);
+#endif
+
+static int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi);
+
+#ifdef DEBUG
+static void draw_pixel(struct fb_info *fbi, int x, int y, unsigned color)
+{
+	struct fb_var_screeninfo *var = &fbi->var;
+	struct fb_fix_screeninfo *fix = &fbi->fix;
+	void __iomem *addr = fbi->screen_base;
+	const unsigned bytespp = var->bits_per_pixel >> 3;
+	const unsigned line_len = fix->line_length / bytespp;
+
+	int r = (color >> 16) & 0xff;
+	int g = (color >> 8) & 0xff;
+	int b = (color >> 0) & 0xff;
+
+	if (var->bits_per_pixel == 16) {
+		u16 __iomem *p = (u16 __iomem *)addr;
+		p += y * line_len + x;
+
+		r = r * 32 / 256;
+		g = g * 64 / 256;
+		b = b * 32 / 256;
+
+		__raw_writew((r << 11) | (g << 5) | (b << 0), p);
+	} else if (var->bits_per_pixel == 24) {
+		u8 __iomem *p = (u8 __iomem *)addr;
+		p += (y * line_len + x) * 3;
+
+		__raw_writeb(b, p + 0);
+		__raw_writeb(g, p + 1);
+		__raw_writeb(r, p + 2);
+	} else if (var->bits_per_pixel == 32) {
+		u32 __iomem *p = (u32 __iomem *)addr;
+		p += y * line_len + x;
+		__raw_writel(color, p);
+	}
+}
+
+static void fill_fb(struct fb_info *fbi)
+{
+	struct fb_var_screeninfo *var = &fbi->var;
+	const short w = var->xres_virtual;
+	const short h = var->yres_virtual;
+	void __iomem *addr = fbi->screen_base;
+	int y, x;
+
+	if (!addr)
+		return;
+
+	DBG("fill_fb %dx%d, line_len %d bytes\n", w, h, fbi->fix.line_length);
+
+	for (y = 0; y < h; y++) {
+		for (x = 0; x < w; x++) {
+			if (x < 20 && y < 20)
+				draw_pixel(fbi, x, y, 0xffffff);
+			else if (x < 20 && (y > 20 && y < h - 20))
+				draw_pixel(fbi, x, y, 0xff);
+			else if (y < 20 && (x > 20 && x < w - 20))
+				draw_pixel(fbi, x, y, 0xff00);
+			else if (x > w - 20 && (y > 20 && y < h - 20))
+				draw_pixel(fbi, x, y, 0xff0000);
+			else if (y > h - 20 && (x > 20 && x < w - 20))
+				draw_pixel(fbi, x, y, 0xffff00);
+			else if (x == 20 || x == w - 20 ||
+					y == 20 || y == h - 20)
+				draw_pixel(fbi, x, y, 0xffffff);
+			else if (x == y || w - x == h - y)
+				draw_pixel(fbi, x, y, 0xff00ff);
+			else if (w - x == y || x == h - y)
+				draw_pixel(fbi, x, y, 0x00ffff);
+			else if (x > 20 && y > 20 && x < w - 20 && y < h - 20) {
+				int t = x * 3 / w;
+				unsigned r = 0, g = 0, b = 0;
+				unsigned c;
+				if (var->bits_per_pixel == 16) {
+					if (t == 0)
+						b = (y % 32) * 256 / 32;
+					else if (t == 1)
+						g = (y % 64) * 256 / 64;
+					else if (t == 2)
+						r = (y % 32) * 256 / 32;
+				} else {
+					if (t == 0)
+						b = (y % 256);
+					else if (t == 1)
+						g = (y % 256);
+					else if (t == 2)
+						r = (y % 256);
+				}
+				c = (r << 16) | (g << 8) | (b << 0);
+				draw_pixel(fbi, x, y, c);
+			} else {
+				draw_pixel(fbi, x, y, 0);
+			}
+		}
+	}
+}
+#endif
+
+static unsigned omapfb_get_vrfb_offset(struct omapfb_info *ofbi, int rot)
+{
+	struct vrfb *vrfb = &ofbi->region.vrfb;
+	unsigned offset;
+
+	switch (rot) {
+	case FB_ROTATE_UR:
+		offset = 0;
+		break;
+	case FB_ROTATE_CW:
+		offset = vrfb->yoffset;
+		break;
+	case FB_ROTATE_UD:
+		offset = vrfb->yoffset * OMAP_VRFB_LINE_LEN + vrfb->xoffset;
+		break;
+	case FB_ROTATE_CCW:
+		offset = vrfb->xoffset * OMAP_VRFB_LINE_LEN;
+		break;
+	default:
+		BUG();
+	}
+
+	offset *= vrfb->bytespp;
+
+	return offset;
+}
+
+static u32 omapfb_get_region_rot_paddr(struct omapfb_info *ofbi, int rot)
+{
+	if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
+		return ofbi->region.vrfb.paddr[rot]
+			+ omapfb_get_vrfb_offset(ofbi, rot);
+	} else {
+		return ofbi->region.paddr;
+	}
+}
+
+static u32 omapfb_get_region_paddr(struct omapfb_info *ofbi)
+{
+	if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
+		return ofbi->region.vrfb.paddr[0];
+	else
+		return ofbi->region.paddr;
+}
+
+static void __iomem *omapfb_get_region_vaddr(struct omapfb_info *ofbi)
+{
+	if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
+		return ofbi->region.vrfb.vaddr[0];
+	else
+		return ofbi->region.vaddr;
+}
+
+static struct omapfb_colormode omapfb_colormodes[] = {
+	{
+		.dssmode = OMAP_DSS_COLOR_UYVY,
+		.bits_per_pixel = 16,
+		.nonstd = OMAPFB_COLOR_YUV422,
+	}, {
+		.dssmode = OMAP_DSS_COLOR_YUV2,
+		.bits_per_pixel = 16,
+		.nonstd = OMAPFB_COLOR_YUY422,
+	}, {
+		.dssmode = OMAP_DSS_COLOR_ARGB16,
+		.bits_per_pixel = 16,
+		.red	= { .length = 4, .offset = 8, .msb_right = 0 },
+		.green	= { .length = 4, .offset = 4, .msb_right = 0 },
+		.blue	= { .length = 4, .offset = 0, .msb_right = 0 },
+		.transp	= { .length = 4, .offset = 12, .msb_right = 0 },
+	}, {
+		.dssmode = OMAP_DSS_COLOR_RGB16,
+		.bits_per_pixel = 16,
+		.red	= { .length = 5, .offset = 11, .msb_right = 0 },
+		.green	= { .length = 6, .offset = 5, .msb_right = 0 },
+		.blue	= { .length = 5, .offset = 0, .msb_right = 0 },
+		.transp	= { .length = 0, .offset = 0, .msb_right = 0 },
+	}, {
+		.dssmode = OMAP_DSS_COLOR_RGB24P,
+		.bits_per_pixel = 24,
+		.red	= { .length = 8, .offset = 16, .msb_right = 0 },
+		.green	= { .length = 8, .offset = 8, .msb_right = 0 },
+		.blue	= { .length = 8, .offset = 0, .msb_right = 0 },
+		.transp	= { .length = 0, .offset = 0, .msb_right = 0 },
+	}, {
+		.dssmode = OMAP_DSS_COLOR_RGB24U,
+		.bits_per_pixel = 32,
+		.red	= { .length = 8, .offset = 16, .msb_right = 0 },
+		.green	= { .length = 8, .offset = 8, .msb_right = 0 },
+		.blue	= { .length = 8, .offset = 0, .msb_right = 0 },
+		.transp	= { .length = 0, .offset = 0, .msb_right = 0 },
+	}, {
+		.dssmode = OMAP_DSS_COLOR_ARGB32,
+		.bits_per_pixel = 32,
+		.red	= { .length = 8, .offset = 16, .msb_right = 0 },
+		.green	= { .length = 8, .offset = 8, .msb_right = 0 },
+		.blue	= { .length = 8, .offset = 0, .msb_right = 0 },
+		.transp	= { .length = 8, .offset = 24, .msb_right = 0 },
+	}, {
+		.dssmode = OMAP_DSS_COLOR_RGBA32,
+		.bits_per_pixel = 32,
+		.red	= { .length = 8, .offset = 24, .msb_right = 0 },
+		.green	= { .length = 8, .offset = 16, .msb_right = 0 },
+		.blue	= { .length = 8, .offset = 8, .msb_right = 0 },
+		.transp	= { .length = 8, .offset = 0, .msb_right = 0 },
+	}, {
+		.dssmode = OMAP_DSS_COLOR_RGBX32,
+		.bits_per_pixel = 32,
+		.red	= { .length = 8, .offset = 24, .msb_right = 0 },
+		.green	= { .length = 8, .offset = 16, .msb_right = 0 },
+		.blue	= { .length = 8, .offset = 8, .msb_right = 0 },
+		.transp	= { .length = 0, .offset = 0, .msb_right = 0 },
+	},
+};
+
+static bool cmp_var_to_colormode(struct fb_var_screeninfo *var,
+		struct omapfb_colormode *color)
+{
+	bool cmp_component(struct fb_bitfield *f1, struct fb_bitfield *f2)
+	{
+		return f1->length == f2->length &&
+			f1->offset == f2->offset &&
+			f1->msb_right == f2->msb_right;
+	}
+
+	if (var->bits_per_pixel == 0 ||
+			var->red.length == 0 ||
+			var->blue.length == 0 ||
+			var->green.length == 0)
+		return 0;
+
+	return var->bits_per_pixel == color->bits_per_pixel &&
+		cmp_component(&var->red, &color->red) &&
+		cmp_component(&var->green, &color->green) &&
+		cmp_component(&var->blue, &color->blue) &&
+		cmp_component(&var->transp, &color->transp);
+}
+
+static void assign_colormode_to_var(struct fb_var_screeninfo *var,
+		struct omapfb_colormode *color)
+{
+	var->bits_per_pixel = color->bits_per_pixel;
+	var->nonstd = color->nonstd;
+	var->red = color->red;
+	var->green = color->green;
+	var->blue = color->blue;
+	var->transp = color->transp;
+}
+
+static int fb_mode_to_dss_mode(struct fb_var_screeninfo *var,
+		enum omap_color_mode *mode)
+{
+	enum omap_color_mode dssmode;
+	int i;
+
+	/* first match with nonstd field */
+	if (var->nonstd) {
+		for (i = 0; i < ARRAY_SIZE(omapfb_colormodes); ++i) {
+			struct omapfb_colormode *m = &omapfb_colormodes[i];
+			if (var->nonstd == m->nonstd) {
+				assign_colormode_to_var(var, m);
+				*mode = m->dssmode;
+				return 0;
+			}
+		}
+
+		return -EINVAL;
+	}
+
+	/* then try exact match of bpp and colors */
+	for (i = 0; i < ARRAY_SIZE(omapfb_colormodes); ++i) {
+		struct omapfb_colormode *m = &omapfb_colormodes[i];
+		if (cmp_var_to_colormode(var, m)) {
+			assign_colormode_to_var(var, m);
+			*mode = m->dssmode;
+			return 0;
+		}
+	}
+
+	/* match with bpp if user has not filled color fields
+	 * properly */
+	switch (var->bits_per_pixel) {
+	case 1:
+		dssmode = OMAP_DSS_COLOR_CLUT1;
+		break;
+	case 2:
+		dssmode = OMAP_DSS_COLOR_CLUT2;
+		break;
+	case 4:
+		dssmode = OMAP_DSS_COLOR_CLUT4;
+		break;
+	case 8:
+		dssmode = OMAP_DSS_COLOR_CLUT8;
+		break;
+	case 12:
+		dssmode = OMAP_DSS_COLOR_RGB12U;
+		break;
+	case 16:
+		dssmode = OMAP_DSS_COLOR_RGB16;
+		break;
+	case 24:
+		dssmode = OMAP_DSS_COLOR_RGB24P;
+		break;
+	case 32:
+		dssmode = OMAP_DSS_COLOR_RGB24U;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(omapfb_colormodes); ++i) {
+		struct omapfb_colormode *m = &omapfb_colormodes[i];
+		if (dssmode == m->dssmode) {
+			assign_colormode_to_var(var, m);
+			*mode = m->dssmode;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int check_fb_res_bounds(struct fb_var_screeninfo *var)
+{
+	int xres_min = OMAPFB_PLANE_XRES_MIN;
+	int xres_max = 2048;
+	int yres_min = OMAPFB_PLANE_YRES_MIN;
+	int yres_max = 2048;
+
+	/* XXX: some applications seem to set virtual res to 0. */
+	if (var->xres_virtual == 0)
+		var->xres_virtual = var->xres;
+
+	if (var->yres_virtual == 0)
+		var->yres_virtual = var->yres;
+
+	if (var->xres_virtual < xres_min || var->yres_virtual < yres_min)
+		return -EINVAL;
+
+	if (var->xres < xres_min)
+		var->xres = xres_min;
+	if (var->yres < yres_min)
+		var->yres = yres_min;
+	if (var->xres > xres_max)
+		var->xres = xres_max;
+	if (var->yres > yres_max)
+		var->yres = yres_max;
+
+	if (var->xres > var->xres_virtual)
+		var->xres = var->xres_virtual;
+	if (var->yres > var->yres_virtual)
+		var->yres = var->yres_virtual;
+
+	return 0;
+}
+
+static void shrink_height(unsigned long max_frame_size,
+		struct fb_var_screeninfo *var)
+{
+	DBG("can't fit FB into memory, reducing y\n");
+	var->yres_virtual = max_frame_size /
+		(var->xres_virtual * var->bits_per_pixel >> 3);
+
+	if (var->yres_virtual < OMAPFB_PLANE_YRES_MIN)
+		var->yres_virtual = OMAPFB_PLANE_YRES_MIN;
+
+	if (var->yres > var->yres_virtual)
+		var->yres = var->yres_virtual;
+}
+
+static void shrink_width(unsigned long max_frame_size,
+		struct fb_var_screeninfo *var)
+{
+	DBG("can't fit FB into memory, reducing x\n");
+	var->xres_virtual = max_frame_size / var->yres_virtual /
+		(var->bits_per_pixel >> 3);
+
+	if (var->xres_virtual < OMAPFB_PLANE_XRES_MIN)
+		var->xres_virtual = OMAPFB_PLANE_XRES_MIN;
+
+	if (var->xres > var->xres_virtual)
+		var->xres = var->xres_virtual;
+}
+
+static int check_vrfb_fb_size(unsigned long region_size,
+		const struct fb_var_screeninfo *var)
+{
+	unsigned long min_phys_size = omap_vrfb_min_phys_size(var->xres_virtual,
+		var->yres_virtual, var->bits_per_pixel >> 3);
+
+	return min_phys_size > region_size ? -EINVAL : 0;
+}
+
+static int check_fb_size(const struct omapfb_info *ofbi,
+		struct fb_var_screeninfo *var)
+{
+	unsigned long max_frame_size = ofbi->region.size;
+	int bytespp = var->bits_per_pixel >> 3;
+	unsigned long line_size = var->xres_virtual * bytespp;
+
+	if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
+		/* One needs to check for both VRFB and OMAPFB limitations. */
+		if (check_vrfb_fb_size(max_frame_size, var))
+			shrink_height(omap_vrfb_max_height(
+				max_frame_size, var->xres_virtual, bytespp) *
+				line_size, var);
+
+		if (check_vrfb_fb_size(max_frame_size, var)) {
+			DBG("cannot fit FB to memory\n");
+			return -EINVAL;
+		}
+
+		return 0;
+	}
+
+	DBG("max frame size %lu, line size %lu\n", max_frame_size, line_size);
+
+	if (line_size * var->yres_virtual > max_frame_size)
+		shrink_height(max_frame_size, var);
+
+	if (line_size * var->yres_virtual > max_frame_size) {
+		shrink_width(max_frame_size, var);
+		line_size = var->xres_virtual * bytespp;
+	}
+
+	if (line_size * var->yres_virtual > max_frame_size) {
+		DBG("cannot fit FB to memory\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * Consider if VRFB assisted rotation is in use and if the virtual space for
+ * the zero degree view needs to be mapped. The need for mapping also acts as
+ * the trigger for setting up the hardware on the context in question. This
+ * ensures that one does not attempt to access the virtual view before the
+ * hardware is serving the address translations.
+ */
+static int setup_vrfb_rotation(struct fb_info *fbi)
+{
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	struct omapfb2_mem_region *rg = &ofbi->region;
+	struct vrfb *vrfb = &rg->vrfb;
+	struct fb_var_screeninfo *var = &fbi->var;
+	struct fb_fix_screeninfo *fix = &fbi->fix;
+	unsigned bytespp;
+	bool yuv_mode;
+	enum omap_color_mode mode;
+	int r;
+	bool reconf;
+
+	if (!rg->size || ofbi->rotation_type != OMAP_DSS_ROT_VRFB)
+		return 0;
+
+	DBG("setup_vrfb_rotation\n");
+
+	r = fb_mode_to_dss_mode(var, &mode);
+	if (r)
+		return r;
+
+	bytespp = var->bits_per_pixel >> 3;
+
+	yuv_mode = mode == OMAP_DSS_COLOR_YUV2 || mode == OMAP_DSS_COLOR_UYVY;
+
+	/* We need to reconfigure VRFB if the resolution changes, if yuv mode
+	 * is enabled/disabled, or if bytes per pixel changes */
+
+	/* XXX we shouldn't allow this when framebuffer is mmapped */
+
+	reconf = false;
+
+	if (yuv_mode != vrfb->yuv_mode)
+		reconf = true;
+	else if (bytespp != vrfb->bytespp)
+		reconf = true;
+	else if (vrfb->xres != var->xres_virtual ||
+			vrfb->yres != var->yres_virtual)
+		reconf = true;
+
+	if (vrfb->vaddr[0] && reconf) {
+		fbi->screen_base = NULL;
+		fix->smem_start = 0;
+		fix->smem_len = 0;
+		iounmap(vrfb->vaddr[0]);
+		vrfb->vaddr[0] = NULL;
+		DBG("setup_vrfb_rotation: reset fb\n");
+	}
+
+	if (vrfb->vaddr[0])
+		return 0;
+
+	omap_vrfb_setup(&rg->vrfb, rg->paddr,
+			var->xres_virtual,
+			var->yres_virtual,
+			bytespp, yuv_mode);
+
+	/* Now one can ioremap the 0 angle view */
+	r = omap_vrfb_map_angle(vrfb, var->yres_virtual, 0);
+	if (r)
+		return r;
+
+	/* used by open/write in fbmem.c */
+	fbi->screen_base = ofbi->region.vrfb.vaddr[0];
+
+	fix->smem_start = ofbi->region.vrfb.paddr[0];
+
+	switch (var->nonstd) {
+	case OMAPFB_COLOR_YUV422:
+	case OMAPFB_COLOR_YUY422:
+		fix->line_length =
+			(OMAP_VRFB_LINE_LEN * var->bits_per_pixel) >> 2;
+		break;
+	default:
+		fix->line_length =
+			(OMAP_VRFB_LINE_LEN * var->bits_per_pixel) >> 3;
+		break;
+	}
+
+	fix->smem_len = var->yres_virtual * fix->line_length;
+
+	return 0;
+}
+
+int dss_mode_to_fb_mode(enum omap_color_mode dssmode,
+			struct fb_var_screeninfo *var)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(omapfb_colormodes); ++i) {
+		struct omapfb_colormode *mode = &omapfb_colormodes[i];
+		if (dssmode == mode->dssmode) {
+			assign_colormode_to_var(var, mode);
+			return 0;
+		}
+	}
+	return -ENOENT;
+}
+
+void set_fb_fix(struct fb_info *fbi)
+{
+	struct fb_fix_screeninfo *fix = &fbi->fix;
+	struct fb_var_screeninfo *var = &fbi->var;
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	struct omapfb2_mem_region *rg = &ofbi->region;
+
+	DBG("set_fb_fix\n");
+
+	/* used by open/write in fbmem.c */
+	fbi->screen_base = (char __iomem *)omapfb_get_region_vaddr(ofbi);
+
+	/* used by mmap in fbmem.c */
+	if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
+		switch (var->nonstd) {
+		case OMAPFB_COLOR_YUV422:
+		case OMAPFB_COLOR_YUY422:
+			fix->line_length =
+				(OMAP_VRFB_LINE_LEN * var->bits_per_pixel) >> 2;
+			break;
+		default:
+			fix->line_length =
+				(OMAP_VRFB_LINE_LEN * var->bits_per_pixel) >> 3;
+			break;
+		}
+
+		fix->smem_len = var->yres_virtual * fix->line_length;
+	} else {
+		fix->line_length =
+			(var->xres_virtual * var->bits_per_pixel) >> 3;
+		fix->smem_len = rg->size;
+	}
+
+	fix->smem_start = omapfb_get_region_paddr(ofbi);
+
+	fix->type = FB_TYPE_PACKED_PIXELS;
+
+	if (var->nonstd)
+		fix->visual = FB_VISUAL_PSEUDOCOLOR;
+	else {
+		switch (var->bits_per_pixel) {
+		case 32:
+		case 24:
+		case 16:
+		case 12:
+			fix->visual = FB_VISUAL_TRUECOLOR;
+			/* 12bpp is stored in 16 bits */
+			break;
+		case 1:
+		case 2:
+		case 4:
+		case 8:
+			fix->visual = FB_VISUAL_PSEUDOCOLOR;
+			break;
+		}
+	}
+
+	fix->accel = FB_ACCEL_NONE;
+
+	fix->xpanstep = 1;
+	fix->ypanstep = 1;
+}
+
+/* check new var and possibly modify it to be ok */
+int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var)
+{
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	struct omap_dss_device *display = fb2display(fbi);
+	enum omap_color_mode mode = 0;
+	int i;
+	int r;
+
+	DBG("check_fb_var %d\n", ofbi->id);
+
+	if (ofbi->region.size == 0)
+		return 0;
+
+	r = fb_mode_to_dss_mode(var, &mode);
+	if (r) {
+		DBG("cannot convert var to omap dss mode\n");
+		return r;
+	}
+
+	for (i = 0; i < ofbi->num_overlays; ++i) {
+		if ((ofbi->overlays[i]->supported_modes & mode) == 0) {
+			DBG("invalid mode\n");
+			return -EINVAL;
+		}
+	}
+
+	if (var->rotate < 0 || var->rotate > 3)
+		return -EINVAL;
+
+	if (check_fb_res_bounds(var))
+		return -EINVAL;
+
+	if (check_fb_size(ofbi, var))
+		return -EINVAL;
+
+	if (var->xres + var->xoffset > var->xres_virtual)
+		var->xoffset = var->xres_virtual - var->xres;
+	if (var->yres + var->yoffset > var->yres_virtual)
+		var->yoffset = var->yres_virtual - var->yres;
+
+	DBG("xres = %d, yres = %d, vxres = %d, vyres = %d\n",
+			var->xres, var->yres,
+			var->xres_virtual, var->yres_virtual);
+
+	var->height             = -1;
+	var->width              = -1;
+	var->grayscale          = 0;
+
+	if (display && display->get_timings) {
+		struct omap_video_timings timings;
+		display->get_timings(display, &timings);
+
+		/* pixclock in ps, the rest in pixclock */
+		var->pixclock = timings.pixel_clock != 0 ?
+			KHZ2PICOS(timings.pixel_clock) :
+			0;
+		var->left_margin = timings.hfp;
+		var->right_margin = timings.hbp;
+		var->upper_margin = timings.vfp;
+		var->lower_margin = timings.vbp;
+		var->hsync_len = timings.hsw;
+		var->vsync_len = timings.vsw;
+	} else {
+		var->pixclock = 0;
+		var->left_margin = 0;
+		var->right_margin = 0;
+		var->upper_margin = 0;
+		var->lower_margin = 0;
+		var->hsync_len = 0;
+		var->vsync_len = 0;
+	}
+
+	/* TODO: get these from panel->config */
+	var->vmode              = FB_VMODE_NONINTERLACED;
+	var->sync               = 0;
+
+	return 0;
+}
+
+/*
+ * ---------------------------------------------------------------------------
+ * fbdev framework callbacks
+ * ---------------------------------------------------------------------------
+ */
+static int omapfb_open(struct fb_info *fbi, int user)
+{
+	return 0;
+}
+
+static int omapfb_release(struct fb_info *fbi, int user)
+{
+#if 0
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	struct omapfb2_device *fbdev = ofbi->fbdev;
+	struct omap_dss_device *display = fb2display(fbi);
+
+	DBG("Closing fb with plane index %d\n", ofbi->id);
+
+	omapfb_lock(fbdev);
+
+	if (display && display->get_update_mode && display->update) {
+		/* XXX this update should be removed, I think. But it's
+		 * good for debugging */
+		if (display->get_update_mode(display) ==
+				OMAP_DSS_UPDATE_MANUAL) {
+			u16 w, h;
+
+			if (display->sync)
+				display->sync(display);
+
+			display->get_resolution(display, &w, &h);
+			display->update(display, 0, 0, w, h);
+		}
+	}
+
+	if (display && display->sync)
+		display->sync(display);
+
+	omapfb_unlock(fbdev);
+#endif
+	return 0;
+}
+
+static unsigned calc_rotation_offset_dma(struct fb_var_screeninfo *var,
+		struct fb_fix_screeninfo *fix, int rotation)
+{
+	unsigned offset;
+
+	offset = var->yoffset * fix->line_length +
+		var->xoffset * (var->bits_per_pixel >> 3);
+
+	return offset;
+}
+
+static unsigned calc_rotation_offset_vrfb(struct fb_var_screeninfo *var,
+		struct fb_fix_screeninfo *fix, int rotation)
+{
+	unsigned offset;
+
+	if (rotation == FB_ROTATE_UD)
+		offset = (var->yres_virtual - var->yres) *
+			fix->line_length;
+	else if (rotation == FB_ROTATE_CW)
+		offset = (var->yres_virtual - var->yres) *
+			(var->bits_per_pixel >> 3);
+	else
+		offset = 0;
+
+	if (rotation == FB_ROTATE_UR)
+		offset += var->yoffset * fix->line_length +
+			var->xoffset * (var->bits_per_pixel >> 3);
+	else if (rotation == FB_ROTATE_UD)
+		offset -= var->yoffset * fix->line_length +
+			var->xoffset * (var->bits_per_pixel >> 3);
+	else if (rotation == FB_ROTATE_CW)
+		offset -= var->xoffset * fix->line_length +
+			var->yoffset * (var->bits_per_pixel >> 3);
+	else if (rotation == FB_ROTATE_CCW)
+		offset += var->xoffset * fix->line_length +
+			var->yoffset * (var->bits_per_pixel >> 3);
+
+	return offset;
+}
+
+
+/* setup overlay according to the fb */
+static int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl,
+		u16 posx, u16 posy, u16 outw, u16 outh)
+{
+	int r = 0;
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	struct fb_var_screeninfo *var = &fbi->var;
+	struct fb_fix_screeninfo *fix = &fbi->fix;
+	enum omap_color_mode mode = 0;
+	int offset;
+	u32 data_start_p;
+	void __iomem *data_start_v;
+	struct omap_overlay_info info;
+	int xres, yres;
+	int screen_width;
+	int mirror;
+	int rotation = var->rotate;
+	int i;
+
+	for (i = 0; i < ofbi->num_overlays; i++) {
+		if (ovl != ofbi->overlays[i])
+			continue;
+
+		rotation = (rotation + ofbi->rotation[i]) % 4;
+		break;
+	}
+
+	DBG("setup_overlay %d, posx %d, posy %d, outw %d, outh %d\n", ofbi->id,
+			posx, posy, outw, outh);
+
+	if (rotation == FB_ROTATE_CW || rotation == FB_ROTATE_CCW) {
+		xres = var->yres;
+		yres = var->xres;
+	} else {
+		xres = var->xres;
+		yres = var->yres;
+	}
+
+
+	if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
+		data_start_p = omapfb_get_region_rot_paddr(ofbi, rotation);
+		data_start_v = NULL;
+	} else {
+		data_start_p = omapfb_get_region_paddr(ofbi);
+		data_start_v = omapfb_get_region_vaddr(ofbi);
+	}
+
+	if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
+		offset = calc_rotation_offset_vrfb(var, fix, rotation);
+	else
+		offset = calc_rotation_offset_dma(var, fix, rotation);
+
+	data_start_p += offset;
+	data_start_v += offset;
+
+	if (offset)
+		DBG("offset %d, %d = %d\n",
+				var->xoffset, var->yoffset, offset);
+
+	DBG("paddr %x, vaddr %p\n", data_start_p, data_start_v);
+
+	r = fb_mode_to_dss_mode(var, &mode);
+	if (r) {
+		DBG("fb_mode_to_dss_mode failed");
+		goto err;
+	}
+
+	switch (var->nonstd) {
+	case OMAPFB_COLOR_YUV422:
+	case OMAPFB_COLOR_YUY422:
+		if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
+			screen_width = fix->line_length
+				/ (var->bits_per_pixel >> 2);
+			break;
+		}
+	default:
+		screen_width = fix->line_length / (var->bits_per_pixel >> 3);
+		break;
+	}
+
+	ovl->get_overlay_info(ovl, &info);
+
+	if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
+		mirror = 0;
+	else
+		mirror = ofbi->mirror;
+
+	info.paddr = data_start_p;
+	info.vaddr = data_start_v;
+	info.screen_width = screen_width;
+	info.width = xres;
+	info.height = yres;
+	info.color_mode = mode;
+	info.rotation_type = ofbi->rotation_type;
+	info.rotation = rotation;
+	info.mirror = mirror;
+
+	info.pos_x = posx;
+	info.pos_y = posy;
+	info.out_width = outw;
+	info.out_height = outh;
+
+	r = ovl->set_overlay_info(ovl, &info);
+	if (r) {
+		DBG("ovl->setup_overlay_info failed\n");
+		goto err;
+	}
+
+	return 0;
+
+err:
+	DBG("setup_overlay failed\n");
+	return r;
+}
+
+/* apply var to the overlay */
+int omapfb_apply_changes(struct fb_info *fbi, int init)
+{
+	int r = 0;
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	struct fb_var_screeninfo *var = &fbi->var;
+	struct omap_overlay *ovl;
+	u16 posx, posy;
+	u16 outw, outh;
+	int i;
+
+#ifdef DEBUG
+	if (omapfb_test_pattern)
+		fill_fb(fbi);
+#endif
+
+	for (i = 0; i < ofbi->num_overlays; i++) {
+		ovl = ofbi->overlays[i];
+
+		DBG("apply_changes, fb %d, ovl %d\n", ofbi->id, ovl->id);
+
+		if (ofbi->region.size == 0) {
+			/* the fb is not available. disable the overlay */
+			omapfb_overlay_enable(ovl, 0);
+			if (!init && ovl->manager)
+				ovl->manager->apply(ovl->manager);
+			continue;
+		}
+
+		if (init || (ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
+			int rotation = (var->rotate + ofbi->rotation[i]) % 4;
+			if (rotation == FB_ROTATE_CW ||
+					rotation == FB_ROTATE_CCW) {
+				outw = var->yres;
+				outh = var->xres;
+			} else {
+				outw = var->xres;
+				outh = var->yres;
+			}
+		} else {
+			outw = ovl->info.out_width;
+			outh = ovl->info.out_height;
+		}
+
+		if (init) {
+			posx = 0;
+			posy = 0;
+		} else {
+			posx = ovl->info.pos_x;
+			posy = ovl->info.pos_y;
+		}
+
+		r = omapfb_setup_overlay(fbi, ovl, posx, posy, outw, outh);
+		if (r)
+			goto err;
+
+		if (!init && ovl->manager)
+			ovl->manager->apply(ovl->manager);
+	}
+	return 0;
+err:
+	DBG("apply_changes failed\n");
+	return r;
+}
+
+/* checks var and eventually tweaks it to something supported,
+ * DO NOT MODIFY PAR */
+static int omapfb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi)
+{
+	int r;
+
+	DBG("check_var(%d)\n", FB2OFB(fbi)->id);
+
+	r = check_fb_var(fbi, var);
+
+	return r;
+}
+
+/* set the video mode according to info->var */
+static int omapfb_set_par(struct fb_info *fbi)
+{
+	int r;
+
+	DBG("set_par(%d)\n", FB2OFB(fbi)->id);
+
+	set_fb_fix(fbi);
+
+	r = setup_vrfb_rotation(fbi);
+	if (r)
+		return r;
+
+	r = omapfb_apply_changes(fbi, 0);
+
+	return r;
+}
+
+static int omapfb_pan_display(struct fb_var_screeninfo *var,
+		struct fb_info *fbi)
+{
+	struct fb_var_screeninfo new_var;
+	int r;
+
+	DBG("pan_display(%d)\n", FB2OFB(fbi)->id);
+
+	if (var->xoffset == fbi->var.xoffset &&
+	    var->yoffset == fbi->var.yoffset)
+		return 0;
+
+	new_var = fbi->var;
+	new_var.xoffset = var->xoffset;
+	new_var.yoffset = var->yoffset;
+
+	fbi->var = new_var;
+
+	r = omapfb_apply_changes(fbi, 0);
+
+	return r;
+}
+
+static void mmap_user_open(struct vm_area_struct *vma)
+{
+	struct omapfb_info *ofbi = (struct omapfb_info *)vma->vm_private_data;
+
+	atomic_inc(&ofbi->map_count);
+}
+
+static void mmap_user_close(struct vm_area_struct *vma)
+{
+	struct omapfb_info *ofbi = (struct omapfb_info *)vma->vm_private_data;
+
+	atomic_dec(&ofbi->map_count);
+}
+
+static struct vm_operations_struct mmap_user_ops = {
+	.open = mmap_user_open,
+	.close = mmap_user_close,
+};
+
+static int omapfb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
+{
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	struct fb_fix_screeninfo *fix = &fbi->fix;
+	unsigned long off;
+	unsigned long start;
+	u32 len;
+
+	if (vma->vm_end - vma->vm_start == 0)
+		return 0;
+	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
+		return -EINVAL;
+	off = vma->vm_pgoff << PAGE_SHIFT;
+
+	start = omapfb_get_region_paddr(ofbi);
+	len = fix->smem_len;
+	if (off >= len)
+		return -EINVAL;
+	if ((vma->vm_end - vma->vm_start + off) > len)
+		return -EINVAL;
+
+	off += start;
+
+	DBG("user mmap region start %lx, len %d, off %lx\n", start, len, off);
+
+	vma->vm_pgoff = off >> PAGE_SHIFT;
+	vma->vm_flags |= VM_IO | VM_RESERVED;
+	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+	vma->vm_ops = &mmap_user_ops;
+	vma->vm_private_data = ofbi;
+	if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
+			     vma->vm_end - vma->vm_start, vma->vm_page_prot))
+		return -EAGAIN;
+	/* vm_ops.open won't be called for mmap itself. */
+	atomic_inc(&ofbi->map_count);
+	return 0;
+}
+
+/* Store a single color palette entry into a pseudo palette or the hardware
+ * palette if one is available. For now we support only 16bpp and thus store
+ * the entry only to the pseudo palette.
+ */
+static int _setcolreg(struct fb_info *fbi, u_int regno, u_int red, u_int green,
+		u_int blue, u_int transp, int update_hw_pal)
+{
+	/*struct omapfb_info *ofbi = FB2OFB(fbi);*/
+	/*struct omapfb2_device *fbdev = ofbi->fbdev;*/
+	struct fb_var_screeninfo *var = &fbi->var;
+	int r = 0;
+
+	enum omapfb_color_format mode = OMAPFB_COLOR_RGB24U; /* XXX */
+
+	/*switch (plane->color_mode) {*/
+	switch (mode) {
+	case OMAPFB_COLOR_YUV422:
+	case OMAPFB_COLOR_YUV420:
+	case OMAPFB_COLOR_YUY422:
+		r = -EINVAL;
+		break;
+	case OMAPFB_COLOR_CLUT_8BPP:
+	case OMAPFB_COLOR_CLUT_4BPP:
+	case OMAPFB_COLOR_CLUT_2BPP:
+	case OMAPFB_COLOR_CLUT_1BPP:
+		/*
+		   if (fbdev->ctrl->setcolreg)
+		   r = fbdev->ctrl->setcolreg(regno, red, green, blue,
+		   transp, update_hw_pal);
+		   */
+		/* Fallthrough */
+		r = -EINVAL;
+		break;
+	case OMAPFB_COLOR_RGB565:
+	case OMAPFB_COLOR_RGB444:
+	case OMAPFB_COLOR_RGB24P:
+	case OMAPFB_COLOR_RGB24U:
+		if (r != 0)
+			break;
+
+		if (regno < 0) {
+			r = -EINVAL;
+			break;
+		}
+
+		if (regno < 16) {
+			u16 pal;
+			pal = ((red >> (16 - var->red.length)) <<
+					var->red.offset) |
+				((green >> (16 - var->green.length)) <<
+				 var->green.offset) |
+				(blue >> (16 - var->blue.length));
+			((u32 *)(fbi->pseudo_palette))[regno] = pal;
+		}
+		break;
+	default:
+		BUG();
+	}
+	return r;
+}
+
+static int omapfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+		u_int transp, struct fb_info *info)
+{
+	DBG("setcolreg\n");
+
+	return _setcolreg(info, regno, red, green, blue, transp, 1);
+}
+
+static int omapfb_setcmap(struct fb_cmap *cmap, struct fb_info *info)
+{
+	int count, index, r;
+	u16 *red, *green, *blue, *transp;
+	u16 trans = 0xffff;
+
+	DBG("setcmap\n");
+
+	red     = cmap->red;
+	green   = cmap->green;
+	blue    = cmap->blue;
+	transp  = cmap->transp;
+	index   = cmap->start;
+
+	for (count = 0; count < cmap->len; count++) {
+		if (transp)
+			trans = *transp++;
+		r = _setcolreg(info, index++, *red++, *green++, *blue++, trans,
+				count == cmap->len - 1);
+		if (r != 0)
+			return r;
+	}
+
+	return 0;
+}
+
+static int omapfb_blank(int blank, struct fb_info *fbi)
+{
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	struct omapfb2_device *fbdev = ofbi->fbdev;
+	struct omap_dss_device *display = fb2display(fbi);
+	int do_update = 0;
+	int r = 0;
+
+	omapfb_lock(fbdev);
+
+	switch (blank) {
+	case FB_BLANK_UNBLANK:
+		if (display->state != OMAP_DSS_DISPLAY_SUSPENDED)
+			goto exit;
+
+		if (display->resume)
+			r = display->resume(display);
+
+		if (r == 0 && display->get_update_mode &&
+				display->get_update_mode(display) ==
+				OMAP_DSS_UPDATE_MANUAL)
+			do_update = 1;
+
+		break;
+
+	case FB_BLANK_NORMAL:
+		/* FB_BLANK_NORMAL could be implemented.
+		 * Needs DSS additions. */
+	case FB_BLANK_VSYNC_SUSPEND:
+	case FB_BLANK_HSYNC_SUSPEND:
+	case FB_BLANK_POWERDOWN:
+		if (display->state != OMAP_DSS_DISPLAY_ACTIVE)
+			goto exit;
+
+		if (display->suspend)
+			r = display->suspend(display);
+
+		break;
+
+	default:
+		r = -EINVAL;
+	}
+
+exit:
+	omapfb_unlock(fbdev);
+
+	if (r == 0 && do_update && display->update) {
+		u16 w, h;
+		display->get_resolution(display, &w, &h);
+
+		r = display->update(display, 0, 0, w, h);
+	}
+
+	return r;
+}
+
+#if 0
+/* XXX fb_read and fb_write are needed for VRFB */
+ssize_t omapfb_write(struct fb_info *info, const char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	DBG("omapfb_write %d, %lu\n", count, (unsigned long)*ppos);
+	/* XXX needed for VRFB */
+	return count;
+}
+#endif
+
+static struct fb_ops omapfb_ops = {
+	.owner          = THIS_MODULE,
+	.fb_open        = omapfb_open,
+	.fb_release     = omapfb_release,
+	.fb_fillrect    = cfb_fillrect,
+	.fb_copyarea    = cfb_copyarea,
+	.fb_imageblit   = cfb_imageblit,
+	.fb_blank       = omapfb_blank,
+	.fb_ioctl       = omapfb_ioctl,
+	.fb_check_var   = omapfb_check_var,
+	.fb_set_par     = omapfb_set_par,
+	.fb_pan_display = omapfb_pan_display,
+	.fb_mmap	= omapfb_mmap,
+	.fb_setcolreg	= omapfb_setcolreg,
+	.fb_setcmap	= omapfb_setcmap,
+	/*.fb_write	= omapfb_write,*/
+};
+
+static void omapfb_free_fbmem(struct fb_info *fbi)
+{
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	struct omapfb2_device *fbdev = ofbi->fbdev;
+	struct omapfb2_mem_region *rg;
+
+	rg = &ofbi->region;
+
+	if (rg->paddr)
+		if (omap_vram_free(rg->paddr, rg->size))
+			dev_err(fbdev->dev, "VRAM FREE failed\n");
+
+	if (rg->vaddr)
+		iounmap(rg->vaddr);
+
+	if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
+		/* unmap the 0 angle rotation */
+		if (rg->vrfb.vaddr[0]) {
+			iounmap(rg->vrfb.vaddr[0]);
+			omap_vrfb_release_ctx(&rg->vrfb);
+		}
+	}
+
+	rg->vaddr = NULL;
+	rg->paddr = 0;
+	rg->alloc = 0;
+	rg->size = 0;
+}
+
+static void clear_fb_info(struct fb_info *fbi)
+{
+	memset(&fbi->var, 0, sizeof(fbi->var));
+	memset(&fbi->fix, 0, sizeof(fbi->fix));
+	strlcpy(fbi->fix.id, MODULE_NAME, sizeof(fbi->fix.id));
+}
+
+static int omapfb_free_all_fbmem(struct omapfb2_device *fbdev)
+{
+	int i;
+
+	DBG("free all fbmem\n");
+
+	for (i = 0; i < fbdev->num_fbs; i++) {
+		struct fb_info *fbi = fbdev->fbs[i];
+		omapfb_free_fbmem(fbi);
+		clear_fb_info(fbi);
+	}
+
+	return 0;
+}
+
+static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size,
+		unsigned long paddr)
+{
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	struct omapfb2_device *fbdev = ofbi->fbdev;
+	struct omapfb2_mem_region *rg;
+	void __iomem *vaddr;
+	int r;
+
+	rg = &ofbi->region;
+	memset(rg, 0, sizeof(*rg));
+
+	size = PAGE_ALIGN(size);
+
+	if (!paddr) {
+		DBG("allocating %lu bytes for fb %d\n", size, ofbi->id);
+		r = omap_vram_alloc(OMAP_VRAM_MEMTYPE_SDRAM, size, &paddr);
+	} else {
+		DBG("reserving %lu bytes at %lx for fb %d\n", size, paddr,
+				ofbi->id);
+		r = omap_vram_reserve(paddr, size);
+	}
+
+	if (r) {
+		dev_err(fbdev->dev, "failed to allocate framebuffer\n");
+		return -ENOMEM;
+	}
+
+	if (ofbi->rotation_type != OMAP_DSS_ROT_VRFB) {
+		vaddr = ioremap_wc(paddr, size);
+
+		if (!vaddr) {
+			dev_err(fbdev->dev, "failed to ioremap framebuffer\n");
+			omap_vram_free(paddr, size);
+			return -ENOMEM;
+		}
+
+		DBG("allocated VRAM paddr %lx, vaddr %p\n", paddr, vaddr);
+	} else {
+		r = omap_vrfb_request_ctx(&rg->vrfb);
+		if (r) {
+			dev_err(fbdev->dev, "vrfb create ctx failed\n");
+			return r;
+		}
+
+		vaddr = NULL;
+	}
+
+	rg->paddr = paddr;
+	rg->vaddr = vaddr;
+	rg->size = size;
+	rg->alloc = 1;
+
+	return 0;
+}
+
+/* allocate fbmem using display resolution as reference */
+static int omapfb_alloc_fbmem_display(struct fb_info *fbi, unsigned long size,
+		unsigned long paddr)
+{
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	struct omap_dss_device *display;
+	int bytespp;
+
+	display =  fb2display(fbi);
+
+	if (!display)
+		return 0;
+
+	switch (display->get_recommended_bpp(display)) {
+	case 16:
+		bytespp = 2;
+		break;
+	case 24:
+		bytespp = 4;
+		break;
+	default:
+		bytespp = 4;
+		break;
+	}
+
+	if (!size) {
+		u16 w, h;
+
+		display->get_resolution(display, &w, &h);
+
+		if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
+			size = max(omap_vrfb_min_phys_size(w, h, bytespp),
+					omap_vrfb_min_phys_size(h, w, bytespp));
+
+			DBG("adjusting fb mem size for VRFB, %u -> %lu\n",
+					w * h * bytespp, size);
+		} else {
+			size = w * h * bytespp;
+		}
+	}
+
+	if (!size)
+		return 0;
+
+	return omapfb_alloc_fbmem(fbi, size, paddr);
+}
+
+static enum omap_color_mode fb_format_to_dss_mode(enum omapfb_color_format fmt)
+{
+	enum omap_color_mode mode;
+
+	switch (fmt) {
+	case OMAPFB_COLOR_RGB565:
+		mode = OMAP_DSS_COLOR_RGB16;
+		break;
+	case OMAPFB_COLOR_YUV422:
+		mode = OMAP_DSS_COLOR_YUV2;
+		break;
+	case OMAPFB_COLOR_CLUT_8BPP:
+		mode = OMAP_DSS_COLOR_CLUT8;
+		break;
+	case OMAPFB_COLOR_CLUT_4BPP:
+		mode = OMAP_DSS_COLOR_CLUT4;
+		break;
+	case OMAPFB_COLOR_CLUT_2BPP:
+		mode = OMAP_DSS_COLOR_CLUT2;
+		break;
+	case OMAPFB_COLOR_CLUT_1BPP:
+		mode = OMAP_DSS_COLOR_CLUT1;
+		break;
+	case OMAPFB_COLOR_RGB444:
+		mode = OMAP_DSS_COLOR_RGB12U;
+		break;
+	case OMAPFB_COLOR_YUY422:
+		mode = OMAP_DSS_COLOR_UYVY;
+		break;
+	case OMAPFB_COLOR_ARGB16:
+		mode = OMAP_DSS_COLOR_ARGB16;
+		break;
+	case OMAPFB_COLOR_RGB24U:
+		mode = OMAP_DSS_COLOR_RGB24U;
+		break;
+	case OMAPFB_COLOR_RGB24P:
+		mode = OMAP_DSS_COLOR_RGB24P;
+		break;
+	case OMAPFB_COLOR_ARGB32:
+		mode = OMAP_DSS_COLOR_ARGB32;
+		break;
+	case OMAPFB_COLOR_RGBA32:
+		mode = OMAP_DSS_COLOR_RGBA32;
+		break;
+	case OMAPFB_COLOR_RGBX32:
+		mode = OMAP_DSS_COLOR_RGBX32;
+		break;
+	default:
+		mode = -EINVAL;
+	}
+
+	return mode;
+}
+
+static int omapfb_parse_vram_param(const char *param, int max_entries,
+		unsigned long *sizes, unsigned long *paddrs)
+{
+	int fbnum;
+	unsigned long size;
+	unsigned long paddr = 0;
+	char *p, *start;
+
+	start = (char *)param;
+
+	while (1) {
+		p = start;
+
+		fbnum = simple_strtoul(p, &p, 10);
+
+		if (p == param)
+			return -EINVAL;
+
+		if (*p != ':')
+			return -EINVAL;
+
+		if (fbnum >= max_entries)
+			return -EINVAL;
+
+		size = memparse(p + 1, &p);
+
+		if (!size)
+			return -EINVAL;
+
+		paddr = 0;
+
+		if (*p == '@') {
+			paddr = simple_strtoul(p + 1, &p, 16);
+
+			if (!paddr)
+				return -EINVAL;
+
+		}
+
+		paddrs[fbnum] = paddr;
+		sizes[fbnum] = size;
+
+		if (*p == 0)
+			break;
+
+		if (*p != ',')
+			return -EINVAL;
+
+		++p;
+
+		start = p;
+	}
+
+	return 0;
+}
+
+static int omapfb_allocate_all_fbs(struct omapfb2_device *fbdev)
+{
+	int i, r;
+	unsigned long vram_sizes[10];
+	unsigned long vram_paddrs[10];
+
+	memset(&vram_sizes, 0, sizeof(vram_sizes));
+	memset(&vram_paddrs, 0, sizeof(vram_paddrs));
+
+	if (def_vram &&	omapfb_parse_vram_param(def_vram, 10,
+				vram_sizes, vram_paddrs)) {
+		dev_err(fbdev->dev, "failed to parse vram parameter\n");
+
+		memset(&vram_sizes, 0, sizeof(vram_sizes));
+		memset(&vram_paddrs, 0, sizeof(vram_paddrs));
+	}
+
+	if (fbdev->dev->platform_data) {
+		struct omapfb_platform_data *opd;
+		opd = fbdev->dev->platform_data;
+		for (i = 0; i < opd->mem_desc.region_cnt; ++i) {
+			if (!vram_sizes[i]) {
+				unsigned long size;
+				unsigned long paddr;
+
+				size = opd->mem_desc.region[i].size;
+				paddr = opd->mem_desc.region[i].paddr;
+
+				vram_sizes[i] = size;
+				vram_paddrs[i] = paddr;
+			}
+		}
+	}
+
+	for (i = 0; i < fbdev->num_fbs; i++) {
+		/* allocate memory automatically only for fb0, or if
+		 * excplicitly defined with vram or plat data option */
+		if (i == 0 || vram_sizes[i] != 0) {
+			r = omapfb_alloc_fbmem_display(fbdev->fbs[i],
+					vram_sizes[i], vram_paddrs[i]);
+
+			if (r)
+				return r;
+		}
+	}
+
+	for (i = 0; i < fbdev->num_fbs; i++) {
+		struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]);
+		struct omapfb2_mem_region *rg;
+		rg = &ofbi->region;
+
+		DBG("region%d phys %08x virt %p size=%lu\n",
+				i,
+				rg->paddr,
+				rg->vaddr,
+				rg->size);
+	}
+
+	return 0;
+}
+
+int omapfb_realloc_fbmem(struct fb_info *fbi, unsigned long size, int type)
+{
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	struct omapfb2_device *fbdev = ofbi->fbdev;
+	struct omap_dss_device *display = fb2display(fbi);
+	struct omapfb2_mem_region *rg = &ofbi->region;
+	unsigned long old_size = rg->size;
+	unsigned long old_paddr = rg->paddr;
+	int old_type = rg->type;
+	int r;
+
+	if (type > OMAPFB_MEMTYPE_MAX)
+		return -EINVAL;
+
+	size = PAGE_ALIGN(size);
+
+	if (old_size == size && old_type == type)
+		return 0;
+
+	if (display && display->sync)
+			display->sync(display);
+
+	omapfb_free_fbmem(fbi);
+
+	if (size == 0) {
+		clear_fb_info(fbi);
+		return 0;
+	}
+
+	r = omapfb_alloc_fbmem(fbi, size, 0);
+
+	if (r) {
+		if (old_size)
+			omapfb_alloc_fbmem(fbi, old_size, old_paddr);
+
+		if (rg->size == 0)
+			clear_fb_info(fbi);
+
+		return r;
+	}
+
+	if (old_size == size)
+		return 0;
+
+	if (old_size == 0) {
+		DBG("initializing fb %d\n", ofbi->id);
+		r = omapfb_fb_init(fbdev, fbi);
+		if (r) {
+			DBG("omapfb_fb_init failed\n");
+			goto err;
+		}
+		r = omapfb_apply_changes(fbi, 1);
+		if (r) {
+			DBG("omapfb_apply_changes failed\n");
+			goto err;
+		}
+	} else {
+		struct fb_var_screeninfo new_var;
+		memcpy(&new_var, &fbi->var, sizeof(new_var));
+		r = check_fb_var(fbi, &new_var);
+		if (r)
+			goto err;
+		memcpy(&fbi->var, &new_var, sizeof(fbi->var));
+		set_fb_fix(fbi);
+		r = setup_vrfb_rotation(fbi);
+		if (r)
+			goto err;
+	}
+
+	return 0;
+err:
+	omapfb_free_fbmem(fbi);
+	clear_fb_info(fbi);
+	return r;
+}
+
+/* initialize fb_info, var, fix to something sane based on the display */
+static int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi)
+{
+	struct fb_var_screeninfo *var = &fbi->var;
+	struct omap_dss_device *display = fb2display(fbi);
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	int r = 0;
+
+	fbi->fbops = &omapfb_ops;
+	fbi->flags = FBINFO_FLAG_DEFAULT;
+	fbi->pseudo_palette = fbdev->pseudo_palette;
+
+	if (ofbi->region.size == 0) {
+		clear_fb_info(fbi);
+		return 0;
+	}
+
+	var->nonstd = 0;
+	var->bits_per_pixel = 0;
+
+	var->rotate = def_rotate;
+
+	/*
+	 * Check if there is a default color format set in the board file,
+	 * and use this format instead the default deducted from the
+	 * display bpp.
+	 */
+	if (fbdev->dev->platform_data) {
+		struct omapfb_platform_data *opd;
+		int id = ofbi->id;
+
+		opd = fbdev->dev->platform_data;
+		if (opd->mem_desc.region[id].format_used) {
+			enum omap_color_mode mode;
+			enum omapfb_color_format format;
+
+			format = opd->mem_desc.region[id].format;
+			mode = fb_format_to_dss_mode(format);
+			if (mode < 0) {
+				r = mode;
+				goto err;
+			}
+			r = dss_mode_to_fb_mode(mode, var);
+			if (r < 0)
+				goto err;
+		}
+	}
+
+	if (display) {
+		u16 w, h;
+		int rotation = (var->rotate + ofbi->rotation[0]) % 4;
+
+		display->get_resolution(display, &w, &h);
+
+		if (rotation == FB_ROTATE_CW ||
+				rotation == FB_ROTATE_CCW) {
+			var->xres = h;
+			var->yres = w;
+		} else {
+			var->xres = w;
+			var->yres = h;
+		}
+
+		var->xres_virtual = var->xres;
+		var->yres_virtual = var->yres;
+
+		if (!var->bits_per_pixel) {
+			switch (display->get_recommended_bpp(display)) {
+			case 16:
+				var->bits_per_pixel = 16;
+				break;
+			case 24:
+				var->bits_per_pixel = 32;
+				break;
+			default:
+				dev_err(fbdev->dev, "illegal display "
+						"bpp\n");
+				return -EINVAL;
+			}
+		}
+	} else {
+		/* if there's no display, let's just guess some basic values */
+		var->xres = 320;
+		var->yres = 240;
+		var->xres_virtual = var->xres;
+		var->yres_virtual = var->yres;
+		if (!var->bits_per_pixel)
+			var->bits_per_pixel = 16;
+	}
+
+	r = check_fb_var(fbi, var);
+	if (r)
+		goto err;
+
+	set_fb_fix(fbi);
+	r = setup_vrfb_rotation(fbi);
+	if (r)
+		goto err;
+
+	r = fb_alloc_cmap(&fbi->cmap, 256, 0);
+	if (r)
+		dev_err(fbdev->dev, "unable to allocate color map memory\n");
+
+err:
+	return r;
+}
+
+static void fbinfo_cleanup(struct omapfb2_device *fbdev, struct fb_info *fbi)
+{
+	fb_dealloc_cmap(&fbi->cmap);
+}
+
+
+static void omapfb_free_resources(struct omapfb2_device *fbdev)
+{
+	int i;
+
+	DBG("free_resources\n");
+
+	if (fbdev == NULL)
+		return;
+
+	for (i = 0; i < fbdev->num_fbs; i++)
+		unregister_framebuffer(fbdev->fbs[i]);
+
+	/* free the reserved fbmem */
+	omapfb_free_all_fbmem(fbdev);
+
+	for (i = 0; i < fbdev->num_fbs; i++) {
+		fbinfo_cleanup(fbdev, fbdev->fbs[i]);
+		framebuffer_release(fbdev->fbs[i]);
+	}
+
+	for (i = 0; i < fbdev->num_displays; i++) {
+		if (fbdev->displays[i]->state != OMAP_DSS_DISPLAY_DISABLED)
+			fbdev->displays[i]->disable(fbdev->displays[i]);
+
+		omap_dss_put_device(fbdev->displays[i]);
+	}
+
+	dev_set_drvdata(fbdev->dev, NULL);
+	kfree(fbdev);
+}
+
+static int omapfb_create_framebuffers(struct omapfb2_device *fbdev)
+{
+	int r, i;
+
+	fbdev->num_fbs = 0;
+
+	DBG("create %d framebuffers\n",	CONFIG_FB_OMAP2_NUM_FBS);
+
+	/* allocate fb_infos */
+	for (i = 0; i < CONFIG_FB_OMAP2_NUM_FBS; i++) {
+		struct fb_info *fbi;
+		struct omapfb_info *ofbi;
+
+		fbi = framebuffer_alloc(sizeof(struct omapfb_info),
+				fbdev->dev);
+
+		if (fbi == NULL) {
+			dev_err(fbdev->dev,
+				"unable to allocate memory for plane info\n");
+			return -ENOMEM;
+		}
+
+		clear_fb_info(fbi);
+
+		fbdev->fbs[i] = fbi;
+
+		ofbi = FB2OFB(fbi);
+		ofbi->fbdev = fbdev;
+		ofbi->id = i;
+
+		/* assign these early, so that fb alloc can use them */
+		ofbi->rotation_type = def_vrfb ? OMAP_DSS_ROT_VRFB :
+			OMAP_DSS_ROT_DMA;
+		ofbi->mirror = def_mirror;
+
+		fbdev->num_fbs++;
+	}
+
+	DBG("fb_infos allocated\n");
+
+	/* assign overlays for the fbs */
+	for (i = 0; i < min(fbdev->num_fbs, fbdev->num_overlays); i++) {
+		struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]);
+
+		ofbi->overlays[0] = fbdev->overlays[i];
+		ofbi->num_overlays = 1;
+	}
+
+	/* allocate fb memories */
+	r = omapfb_allocate_all_fbs(fbdev);
+	if (r) {
+		dev_err(fbdev->dev, "failed to allocate fbmem\n");
+		return r;
+	}
+
+	DBG("fbmems allocated\n");
+
+	/* setup fb_infos */
+	for (i = 0; i < fbdev->num_fbs; i++) {
+		r = omapfb_fb_init(fbdev, fbdev->fbs[i]);
+		if (r) {
+			dev_err(fbdev->dev, "failed to setup fb_info\n");
+			return r;
+		}
+	}
+
+	DBG("fb_infos initialized\n");
+
+	for (i = 0; i < fbdev->num_fbs; i++) {
+		r = register_framebuffer(fbdev->fbs[i]);
+		if (r != 0) {
+			dev_err(fbdev->dev,
+				"registering framebuffer %d failed\n", i);
+			return r;
+		}
+	}
+
+	DBG("framebuffers registered\n");
+
+	for (i = 0; i < fbdev->num_fbs; i++) {
+		r = omapfb_apply_changes(fbdev->fbs[i], 1);
+		if (r) {
+			dev_err(fbdev->dev, "failed to change mode\n");
+			return r;
+		}
+	}
+
+	DBG("create sysfs for fbs\n");
+	r = omapfb_create_sysfs(fbdev);
+	if (r) {
+		dev_err(fbdev->dev, "failed to create sysfs entries\n");
+		return r;
+	}
+
+	/* Enable fb0 */
+	if (fbdev->num_fbs > 0) {
+		struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[0]);
+
+		if (ofbi->num_overlays > 0) {
+			struct omap_overlay *ovl = ofbi->overlays[0];
+
+			r = omapfb_overlay_enable(ovl, 1);
+
+			if (r) {
+				dev_err(fbdev->dev,
+						"failed to enable overlay\n");
+				return r;
+			}
+		}
+	}
+
+	DBG("create_framebuffers done\n");
+
+	return 0;
+}
+
+static int omapfb_mode_to_timings(const char *mode_str,
+		struct omap_video_timings *timings, u8 *bpp)
+{
+	struct fb_info fbi;
+	struct fb_var_screeninfo var;
+	struct fb_ops fbops;
+	int r;
+
+#ifdef CONFIG_OMAP2_DSS_VENC
+	if (strcmp(mode_str, "pal") == 0) {
+		*timings = omap_dss_pal_timings;
+		*bpp = 0;
+		return 0;
+	} else if (strcmp(mode_str, "ntsc") == 0) {
+		*timings = omap_dss_ntsc_timings;
+		*bpp = 0;
+		return 0;
+	}
+#endif
+
+	/* this is quite a hack, but I wanted to use the modedb and for
+	 * that we need fb_info and var, so we create dummy ones */
+
+	memset(&fbi, 0, sizeof(fbi));
+	memset(&var, 0, sizeof(var));
+	memset(&fbops, 0, sizeof(fbops));
+	fbi.fbops = &fbops;
+
+	r = fb_find_mode(&var, &fbi, mode_str, NULL, 0, NULL, 24);
+
+	if (r != 0) {
+		timings->pixel_clock = PICOS2KHZ(var.pixclock);
+		timings->hfp = var.left_margin;
+		timings->hbp = var.right_margin;
+		timings->vfp = var.upper_margin;
+		timings->vbp = var.lower_margin;
+		timings->hsw = var.hsync_len;
+		timings->vsw = var.vsync_len;
+		timings->x_res = var.xres;
+		timings->y_res = var.yres;
+
+		switch (var.bits_per_pixel) {
+		case 16:
+			*bpp = 16;
+			break;
+		case 24:
+		case 32:
+		default:
+			*bpp = 24;
+			break;
+		}
+
+		return 0;
+	} else {
+		return -EINVAL;
+	}
+}
+
+static int omapfb_set_def_mode(struct omap_dss_device *display, char *mode_str)
+{
+	int r;
+	u8 bpp;
+	struct omap_video_timings timings;
+
+	r = omapfb_mode_to_timings(mode_str, &timings, &bpp);
+	if (r)
+		return r;
+
+	display->panel.recommended_bpp = bpp;
+
+	if (!display->check_timings || !display->set_timings)
+		return -EINVAL;
+
+	r = display->check_timings(display, &timings);
+	if (r)
+		return r;
+
+	display->set_timings(display, &timings);
+
+	return 0;
+}
+
+static int omapfb_parse_def_modes(struct omapfb2_device *fbdev)
+{
+	char *str, *options, *this_opt;
+	int r = 0;
+
+	str = kmalloc(strlen(def_mode) + 1, GFP_KERNEL);
+	strcpy(str, def_mode);
+	options = str;
+
+	while (!r && (this_opt = strsep(&options, ",")) != NULL) {
+		char *p, *display_str, *mode_str;
+		struct omap_dss_device *display;
+		int i;
+
+		p = strchr(this_opt, ':');
+		if (!p) {
+			r = -EINVAL;
+			break;
+		}
+
+		*p = 0;
+		display_str = this_opt;
+		mode_str = p + 1;
+
+		display = NULL;
+		for (i = 0; i < fbdev->num_displays; ++i) {
+			if (strcmp(fbdev->displays[i]->name,
+						display_str) == 0) {
+				display = fbdev->displays[i];
+				break;
+			}
+		}
+
+		if (!display) {
+			r = -EINVAL;
+			break;
+		}
+
+		r = omapfb_set_def_mode(display, mode_str);
+		if (r)
+			break;
+	}
+
+	kfree(str);
+
+	return r;
+}
+
+static int omapfb_probe(struct platform_device *pdev)
+{
+	struct omapfb2_device *fbdev = NULL;
+	int r = 0;
+	int i;
+	struct omap_overlay *ovl;
+	struct omap_dss_device *def_display;
+	struct omap_dss_device *dssdev;
+
+	DBG("omapfb_probe\n");
+
+	if (pdev->num_resources != 0) {
+		dev_err(&pdev->dev, "probed for an unknown device\n");
+		r = -ENODEV;
+		goto err0;
+	}
+
+	fbdev = kzalloc(sizeof(struct omapfb2_device), GFP_KERNEL);
+	if (fbdev == NULL) {
+		r = -ENOMEM;
+		goto err0;
+	}
+
+	mutex_init(&fbdev->mtx);
+
+	fbdev->dev = &pdev->dev;
+	platform_set_drvdata(pdev, fbdev);
+
+	fbdev->num_displays = 0;
+	dssdev = NULL;
+	for_each_dss_dev(dssdev) {
+		omap_dss_get_device(dssdev);
+		fbdev->displays[fbdev->num_displays++] = dssdev;
+	}
+
+	if (fbdev->num_displays == 0) {
+		dev_err(&pdev->dev, "no displays\n");
+		r = -EINVAL;
+		goto cleanup;
+	}
+
+	fbdev->num_overlays = omap_dss_get_num_overlays();
+	for (i = 0; i < fbdev->num_overlays; i++)
+		fbdev->overlays[i] = omap_dss_get_overlay(i);
+
+	fbdev->num_managers = omap_dss_get_num_overlay_managers();
+	for (i = 0; i < fbdev->num_managers; i++)
+		fbdev->managers[i] = omap_dss_get_overlay_manager(i);
+
+	if (def_mode && strlen(def_mode) > 0) {
+		if (omapfb_parse_def_modes(fbdev))
+			dev_warn(&pdev->dev, "cannot parse default modes\n");
+	}
+
+	r = omapfb_create_framebuffers(fbdev);
+	if (r)
+		goto cleanup;
+
+	for (i = 0; i < fbdev->num_managers; i++) {
+		struct omap_overlay_manager *mgr;
+		mgr = fbdev->managers[i];
+		r = mgr->apply(mgr);
+		if (r)
+			dev_warn(fbdev->dev, "failed to apply dispc config\n");
+	}
+
+	DBG("mgr->apply'ed\n");
+
+	/* gfx overlay should be the default one. find a display
+	 * connected to that, and use it as default display */
+	ovl = omap_dss_get_overlay(0);
+	if (ovl->manager && ovl->manager->device) {
+		def_display = ovl->manager->device;
+	} else {
+		dev_warn(&pdev->dev, "cannot find default display\n");
+		def_display = NULL;
+	}
+
+	if (def_display) {
+#ifndef CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE
+		u16 w, h;
+#endif
+		r = def_display->enable(def_display);
+		if (r)
+			dev_warn(fbdev->dev, "Failed to enable display '%s'\n",
+					def_display->name);
+
+		/* set the update mode */
+		if (def_display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
+#ifdef CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE
+			if (def_display->enable_te)
+				def_display->enable_te(def_display, 1);
+			if (def_display->set_update_mode)
+				def_display->set_update_mode(def_display,
+						OMAP_DSS_UPDATE_AUTO);
+#else /* MANUAL_UPDATE */
+			if (def_display->enable_te)
+				def_display->enable_te(def_display, 0);
+			if (def_display->set_update_mode)
+				def_display->set_update_mode(def_display,
+						OMAP_DSS_UPDATE_MANUAL);
+
+			def_display->get_resolution(def_display, &w, &h);
+			def_display->update(def_display, 0, 0, w, h);
+#endif
+		} else {
+			if (def_display->set_update_mode)
+				def_display->set_update_mode(def_display,
+						OMAP_DSS_UPDATE_AUTO);
+		}
+	}
+
+	return 0;
+
+cleanup:
+	omapfb_free_resources(fbdev);
+err0:
+	dev_err(&pdev->dev, "failed to setup omapfb\n");
+	return r;
+}
+
+static int omapfb_remove(struct platform_device *pdev)
+{
+	struct omapfb2_device *fbdev = platform_get_drvdata(pdev);
+
+	/* FIXME: wait till completion of pending events */
+
+	omapfb_remove_sysfs(fbdev);
+
+	omapfb_free_resources(fbdev);
+
+	return 0;
+}
+
+static struct platform_driver omapfb_driver = {
+	.probe          = omapfb_probe,
+	.remove         = omapfb_remove,
+	.driver         = {
+		.name   = "omapfb",
+		.owner  = THIS_MODULE,
+	},
+};
+
+static int __init omapfb_init(void)
+{
+	DBG("omapfb_init\n");
+
+	if (platform_driver_register(&omapfb_driver)) {
+		printk(KERN_ERR "failed to register omapfb driver\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void __exit omapfb_exit(void)
+{
+	DBG("omapfb_exit\n");
+	platform_driver_unregister(&omapfb_driver);
+}
+
+module_param_named(mode, def_mode, charp, 0);
+module_param_named(vram, def_vram, charp, 0);
+module_param_named(rotate, def_rotate, int, 0);
+module_param_named(vrfb, def_vrfb, bool, 0);
+module_param_named(mirror, def_mirror, bool, 0);
+
+/* late_initcall to let panel/ctrl drivers loaded first.
+ * I guess better option would be a more dynamic approach,
+ * so that omapfb reacts to new panels when they are loaded */
+late_initcall(omapfb_init);
+/*module_init(omapfb_init);*/
+module_exit(omapfb_exit);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>");
+MODULE_DESCRIPTION("OMAP2/3 Framebuffer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/omap2/omapfb/omapfb-sysfs.c b/drivers/video/omap2/omapfb/omapfb-sysfs.c
new file mode 100644
index 0000000..62bb88f
--- /dev/null
+++ b/drivers/video/omap2/omapfb/omapfb-sysfs.c
@@ -0,0 +1,507 @@
+/*
+ * linux/drivers/video/omap2/omapfb-sysfs.c
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/fb.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/omapfb.h>
+
+#include <plat/display.h>
+#include <plat/vrfb.h>
+
+#include "omapfb.h"
+
+static ssize_t show_rotate_type(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", ofbi->rotation_type);
+}
+
+static ssize_t store_rotate_type(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	enum omap_dss_rotation_type rot_type;
+	int r;
+
+	rot_type = simple_strtoul(buf, NULL, 0);
+
+	if (rot_type != OMAP_DSS_ROT_DMA && rot_type != OMAP_DSS_ROT_VRFB)
+		return -EINVAL;
+
+	lock_fb_info(fbi);
+
+	r = 0;
+	if (rot_type == ofbi->rotation_type)
+		goto out;
+
+	if (ofbi->region.size) {
+		r = -EBUSY;
+		goto out;
+	}
+
+	ofbi->rotation_type = rot_type;
+
+	/*
+	 * Since the VRAM for this FB is not allocated at the moment we don't
+	 * need to do any further parameter checking at this point.
+	 */
+out:
+	unlock_fb_info(fbi);
+
+	return r ? r : count;
+}
+
+
+static ssize_t show_mirror(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", ofbi->mirror);
+}
+
+static ssize_t store_mirror(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	bool mirror;
+	int r;
+	struct fb_var_screeninfo new_var;
+
+	mirror = simple_strtoul(buf, NULL, 0);
+
+	if (mirror != 0 && mirror != 1)
+		return -EINVAL;
+
+	lock_fb_info(fbi);
+
+	ofbi->mirror = mirror;
+
+	memcpy(&new_var, &fbi->var, sizeof(new_var));
+	r = check_fb_var(fbi, &new_var);
+	if (r)
+		goto out;
+	memcpy(&fbi->var, &new_var, sizeof(fbi->var));
+
+	set_fb_fix(fbi);
+
+	r = omapfb_apply_changes(fbi, 0);
+	if (r)
+		goto out;
+
+	r = count;
+out:
+	unlock_fb_info(fbi);
+
+	return r;
+}
+
+static ssize_t show_overlays(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	struct omapfb2_device *fbdev = ofbi->fbdev;
+	ssize_t l = 0;
+	int t;
+
+	omapfb_lock(fbdev);
+	lock_fb_info(fbi);
+
+	for (t = 0; t < ofbi->num_overlays; t++) {
+		struct omap_overlay *ovl = ofbi->overlays[t];
+		int ovlnum;
+
+		for (ovlnum = 0; ovlnum < fbdev->num_overlays; ++ovlnum)
+			if (ovl == fbdev->overlays[ovlnum])
+				break;
+
+		l += snprintf(buf + l, PAGE_SIZE - l, "%s%d",
+				t == 0 ? "" : ",", ovlnum);
+	}
+
+	l += snprintf(buf + l, PAGE_SIZE - l, "\n");
+
+	unlock_fb_info(fbi);
+	omapfb_unlock(fbdev);
+
+	return l;
+}
+
+static struct omapfb_info *get_overlay_fb(struct omapfb2_device *fbdev,
+		struct omap_overlay *ovl)
+{
+	int i, t;
+
+	for (i = 0; i < fbdev->num_fbs; i++) {
+		struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]);
+
+		for (t = 0; t < ofbi->num_overlays; t++) {
+			if (ofbi->overlays[t] == ovl)
+				return ofbi;
+		}
+	}
+
+	return NULL;
+}
+
+static ssize_t store_overlays(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	struct omapfb2_device *fbdev = ofbi->fbdev;
+	struct omap_overlay *ovls[OMAPFB_MAX_OVL_PER_FB];
+	struct omap_overlay *ovl;
+	int num_ovls, r, i;
+	int len;
+	bool added = false;
+
+	num_ovls = 0;
+
+	len = strlen(buf);
+	if (buf[len - 1] == '\n')
+		len = len - 1;
+
+	omapfb_lock(fbdev);
+	lock_fb_info(fbi);
+
+	if (len > 0) {
+		char *p = (char *)buf;
+		int ovlnum;
+
+		while (p < buf + len) {
+			int found;
+			if (num_ovls == OMAPFB_MAX_OVL_PER_FB) {
+				r = -EINVAL;
+				goto out;
+			}
+
+			ovlnum = simple_strtoul(p, &p, 0);
+			if (ovlnum > fbdev->num_overlays) {
+				r = -EINVAL;
+				goto out;
+			}
+
+			found = 0;
+			for (i = 0; i < num_ovls; ++i) {
+				if (ovls[i] == fbdev->overlays[ovlnum]) {
+					found = 1;
+					break;
+				}
+			}
+
+			if (!found)
+				ovls[num_ovls++] = fbdev->overlays[ovlnum];
+
+			p++;
+		}
+	}
+
+	for (i = 0; i < num_ovls; ++i) {
+		struct omapfb_info *ofbi2 = get_overlay_fb(fbdev, ovls[i]);
+		if (ofbi2 && ofbi2 != ofbi) {
+			dev_err(fbdev->dev, "overlay already in use\n");
+			r = -EINVAL;
+			goto out;
+		}
+	}
+
+	/* detach unused overlays */
+	for (i = 0; i < ofbi->num_overlays; ++i) {
+		int t, found;
+
+		ovl = ofbi->overlays[i];
+
+		found = 0;
+
+		for (t = 0; t < num_ovls; ++t) {
+			if (ovl == ovls[t]) {
+				found = 1;
+				break;
+			}
+		}
+
+		if (found)
+			continue;
+
+		DBG("detaching %d\n", ofbi->overlays[i]->id);
+
+		omapfb_overlay_enable(ovl, 0);
+
+		if (ovl->manager)
+			ovl->manager->apply(ovl->manager);
+
+		for (t = i + 1; t < ofbi->num_overlays; t++) {
+			ofbi->rotation[t-1] = ofbi->rotation[t];
+			ofbi->overlays[t-1] = ofbi->overlays[t];
+		}
+
+		ofbi->num_overlays--;
+		i--;
+	}
+
+	for (i = 0; i < num_ovls; ++i) {
+		int t, found;
+
+		ovl = ovls[i];
+
+		found = 0;
+
+		for (t = 0; t < ofbi->num_overlays; ++t) {
+			if (ovl == ofbi->overlays[t]) {
+				found = 1;
+				break;
+			}
+		}
+
+		if (found)
+			continue;
+		ofbi->rotation[ofbi->num_overlays] = 0;
+		ofbi->overlays[ofbi->num_overlays++] = ovl;
+
+		added = true;
+	}
+
+	if (added) {
+		r = omapfb_apply_changes(fbi, 0);
+		if (r)
+			goto out;
+	}
+
+	r = count;
+out:
+	unlock_fb_info(fbi);
+	omapfb_unlock(fbdev);
+
+	return r;
+}
+
+static ssize_t show_overlays_rotate(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	ssize_t l = 0;
+	int t;
+
+	lock_fb_info(fbi);
+
+	for (t = 0; t < ofbi->num_overlays; t++) {
+		l += snprintf(buf + l, PAGE_SIZE - l, "%s%d",
+				t == 0 ? "" : ",", ofbi->rotation[t]);
+	}
+
+	l += snprintf(buf + l, PAGE_SIZE - l, "\n");
+
+	unlock_fb_info(fbi);
+
+	return l;
+}
+
+static ssize_t store_overlays_rotate(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	int num_ovls = 0, r, i;
+	int len;
+	bool changed = false;
+	u8 rotation[OMAPFB_MAX_OVL_PER_FB];
+
+	len = strlen(buf);
+	if (buf[len - 1] == '\n')
+		len = len - 1;
+
+	lock_fb_info(fbi);
+
+	if (len > 0) {
+		char *p = (char *)buf;
+
+		while (p < buf + len) {
+			int rot;
+
+			if (num_ovls == ofbi->num_overlays) {
+				r = -EINVAL;
+				goto out;
+			}
+
+			rot = simple_strtoul(p, &p, 0);
+			if (rot < 0 || rot > 3) {
+				r = -EINVAL;
+				goto out;
+			}
+
+			if (ofbi->rotation[num_ovls] != rot)
+				changed = true;
+
+			rotation[num_ovls++] = rot;
+
+			p++;
+		}
+	}
+
+	if (num_ovls != ofbi->num_overlays) {
+		r = -EINVAL;
+		goto out;
+	}
+
+	if (changed) {
+		for (i = 0; i < num_ovls; ++i)
+			ofbi->rotation[i] = rotation[i];
+
+		r = omapfb_apply_changes(fbi, 0);
+		if (r)
+			goto out;
+
+		/* FIXME error handling? */
+	}
+
+	r = count;
+out:
+	unlock_fb_info(fbi);
+
+	return r;
+}
+
+static ssize_t show_size(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+
+	return snprintf(buf, PAGE_SIZE, "%lu\n", ofbi->region.size);
+}
+
+static ssize_t store_size(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	unsigned long size;
+	int r;
+	int i;
+
+	size = PAGE_ALIGN(simple_strtoul(buf, NULL, 0));
+
+	lock_fb_info(fbi);
+
+	for (i = 0; i < ofbi->num_overlays; i++) {
+		if (ofbi->overlays[i]->info.enabled) {
+			r = -EBUSY;
+			goto out;
+		}
+	}
+
+	if (size != ofbi->region.size) {
+		r = omapfb_realloc_fbmem(fbi, size, ofbi->region.type);
+		if (r) {
+			dev_err(dev, "realloc fbmem failed\n");
+			goto out;
+		}
+	}
+
+	r = count;
+out:
+	unlock_fb_info(fbi);
+
+	return r;
+}
+
+static ssize_t show_phys(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+
+	return snprintf(buf, PAGE_SIZE, "%0x\n", ofbi->region.paddr);
+}
+
+static ssize_t show_virt(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+
+	return snprintf(buf, PAGE_SIZE, "%p\n", ofbi->region.vaddr);
+}
+
+static struct device_attribute omapfb_attrs[] = {
+	__ATTR(rotate_type, S_IRUGO | S_IWUSR, show_rotate_type,
+			store_rotate_type),
+	__ATTR(mirror, S_IRUGO | S_IWUSR, show_mirror, store_mirror),
+	__ATTR(size, S_IRUGO | S_IWUSR, show_size, store_size),
+	__ATTR(overlays, S_IRUGO | S_IWUSR, show_overlays, store_overlays),
+	__ATTR(overlays_rotate, S_IRUGO | S_IWUSR, show_overlays_rotate,
+			store_overlays_rotate),
+	__ATTR(phys_addr, S_IRUGO, show_phys, NULL),
+	__ATTR(virt_addr, S_IRUGO, show_virt, NULL),
+};
+
+int omapfb_create_sysfs(struct omapfb2_device *fbdev)
+{
+	int i;
+	int r;
+
+	DBG("create sysfs for fbs\n");
+	for (i = 0; i < fbdev->num_fbs; i++) {
+		int t;
+		for (t = 0; t < ARRAY_SIZE(omapfb_attrs); t++) {
+			r = device_create_file(fbdev->fbs[i]->dev,
+					&omapfb_attrs[t]);
+
+			if (r) {
+				dev_err(fbdev->dev, "failed to create sysfs "
+						"file\n");
+				return r;
+			}
+		}
+	}
+
+	return 0;
+}
+
+void omapfb_remove_sysfs(struct omapfb2_device *fbdev)
+{
+	int i, t;
+
+	DBG("remove sysfs for fbs\n");
+	for (i = 0; i < fbdev->num_fbs; i++) {
+		for (t = 0; t < ARRAY_SIZE(omapfb_attrs); t++)
+			device_remove_file(fbdev->fbs[i]->dev,
+					&omapfb_attrs[t]);
+	}
+}
+
diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h
new file mode 100644
index 0000000..f7c9c73
--- /dev/null
+++ b/drivers/video/omap2/omapfb/omapfb.h
@@ -0,0 +1,146 @@
+/*
+ * linux/drivers/video/omap2/omapfb.h
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DRIVERS_VIDEO_OMAP2_OMAPFB_H__
+#define __DRIVERS_VIDEO_OMAP2_OMAPFB_H__
+
+#ifdef CONFIG_FB_OMAP2_DEBUG_SUPPORT
+#define DEBUG
+#endif
+
+#include <plat/display.h>
+
+#ifdef DEBUG
+extern unsigned int omapfb_debug;
+#define DBG(format, ...) \
+	if (omapfb_debug) \
+		printk(KERN_DEBUG "OMAPFB: " format, ## __VA_ARGS__)
+#else
+#define DBG(format, ...)
+#endif
+
+#define FB2OFB(fb_info) ((struct omapfb_info *)(fb_info->par))
+
+/* max number of overlays to which a framebuffer data can be direct */
+#define OMAPFB_MAX_OVL_PER_FB 3
+
+struct omapfb2_mem_region {
+	u32		paddr;
+	void __iomem	*vaddr;
+	struct vrfb	vrfb;
+	unsigned long	size;
+	u8		type;		/* OMAPFB_PLANE_MEM_* */
+	bool		alloc;		/* allocated by the driver */
+	bool		map;		/* kernel mapped by the driver */
+};
+
+/* appended to fb_info */
+struct omapfb_info {
+	int id;
+	struct omapfb2_mem_region region;
+	atomic_t map_count;
+	int num_overlays;
+	struct omap_overlay *overlays[OMAPFB_MAX_OVL_PER_FB];
+	struct omapfb2_device *fbdev;
+	enum omap_dss_rotation_type rotation_type;
+	u8 rotation[OMAPFB_MAX_OVL_PER_FB];
+	bool mirror;
+};
+
+struct omapfb2_device {
+	struct device *dev;
+	struct mutex  mtx;
+
+	u32 pseudo_palette[17];
+
+	int state;
+
+	unsigned num_fbs;
+	struct fb_info *fbs[10];
+
+	unsigned num_displays;
+	struct omap_dss_device *displays[10];
+	unsigned num_overlays;
+	struct omap_overlay *overlays[10];
+	unsigned num_managers;
+	struct omap_overlay_manager *managers[10];
+};
+
+struct omapfb_colormode {
+	enum omap_color_mode dssmode;
+	u32 bits_per_pixel;
+	u32 nonstd;
+	struct fb_bitfield red;
+	struct fb_bitfield green;
+	struct fb_bitfield blue;
+	struct fb_bitfield transp;
+};
+
+void set_fb_fix(struct fb_info *fbi);
+int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var);
+int omapfb_realloc_fbmem(struct fb_info *fbi, unsigned long size, int type);
+int omapfb_apply_changes(struct fb_info *fbi, int init);
+
+int omapfb_create_sysfs(struct omapfb2_device *fbdev);
+void omapfb_remove_sysfs(struct omapfb2_device *fbdev);
+
+int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg);
+
+int dss_mode_to_fb_mode(enum omap_color_mode dssmode,
+			struct fb_var_screeninfo *var);
+
+/* find the display connected to this fb, if any */
+static inline struct omap_dss_device *fb2display(struct fb_info *fbi)
+{
+	struct omapfb_info *ofbi = FB2OFB(fbi);
+	int i;
+
+	/* XXX: returns the display connected to first attached overlay */
+	for (i = 0; i < ofbi->num_overlays; i++) {
+		if (ofbi->overlays[i]->manager)
+			return ofbi->overlays[i]->manager->device;
+	}
+
+	return NULL;
+}
+
+static inline void omapfb_lock(struct omapfb2_device *fbdev)
+{
+	mutex_lock(&fbdev->mtx);
+}
+
+static inline void omapfb_unlock(struct omapfb2_device *fbdev)
+{
+	mutex_unlock(&fbdev->mtx);
+}
+
+static inline int omapfb_overlay_enable(struct omap_overlay *ovl,
+		int enable)
+{
+	struct omap_overlay_info info;
+
+	ovl->get_overlay_info(ovl, &info);
+	info.enabled = enable;
+	return ovl->set_overlay_info(ovl, &info);
+}
+
+#endif
diff --git a/drivers/video/omap2/vram.c b/drivers/video/omap2/vram.c
new file mode 100644
index 0000000..55a4de5
--- /dev/null
+++ b/drivers/video/omap2/vram.c
@@ -0,0 +1,655 @@
+/*
+ * VRAM manager for OMAP
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+/*#define DEBUG*/
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include <linux/bootmem.h>
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+
+#include <asm/setup.h>
+
+#include <plat/sram.h>
+#include <plat/vram.h>
+#include <plat/dma.h>
+
+#ifdef DEBUG
+#define DBG(format, ...) pr_debug("VRAM: " format, ## __VA_ARGS__)
+#else
+#define DBG(format, ...)
+#endif
+
+#define OMAP2_SRAM_START		0x40200000
+/* Maximum size, in reality this is smaller if SRAM is partially locked. */
+#define OMAP2_SRAM_SIZE			0xa0000		/* 640k */
+
+/* postponed regions are used to temporarily store region information at boot
+ * time when we cannot yet allocate the region list */
+#define MAX_POSTPONED_REGIONS 10
+
+static bool vram_initialized;
+static int postponed_cnt;
+static struct {
+	unsigned long paddr;
+	size_t size;
+} postponed_regions[MAX_POSTPONED_REGIONS];
+
+struct vram_alloc {
+	struct list_head list;
+	unsigned long paddr;
+	unsigned pages;
+};
+
+struct vram_region {
+	struct list_head list;
+	struct list_head alloc_list;
+	unsigned long paddr;
+	unsigned pages;
+};
+
+static DEFINE_MUTEX(region_mutex);
+static LIST_HEAD(region_list);
+
+static inline int region_mem_type(unsigned long paddr)
+{
+	if (paddr >= OMAP2_SRAM_START &&
+	    paddr < OMAP2_SRAM_START + OMAP2_SRAM_SIZE)
+		return OMAP_VRAM_MEMTYPE_SRAM;
+	else
+		return OMAP_VRAM_MEMTYPE_SDRAM;
+}
+
+static struct vram_region *omap_vram_create_region(unsigned long paddr,
+		unsigned pages)
+{
+	struct vram_region *rm;
+
+	rm = kzalloc(sizeof(*rm), GFP_KERNEL);
+
+	if (rm) {
+		INIT_LIST_HEAD(&rm->alloc_list);
+		rm->paddr = paddr;
+		rm->pages = pages;
+	}
+
+	return rm;
+}
+
+#if 0
+static void omap_vram_free_region(struct vram_region *vr)
+{
+	list_del(&vr->list);
+	kfree(vr);
+}
+#endif
+
+static struct vram_alloc *omap_vram_create_allocation(struct vram_region *vr,
+		unsigned long paddr, unsigned pages)
+{
+	struct vram_alloc *va;
+	struct vram_alloc *new;
+
+	new = kzalloc(sizeof(*va), GFP_KERNEL);
+
+	if (!new)
+		return NULL;
+
+	new->paddr = paddr;
+	new->pages = pages;
+
+	list_for_each_entry(va, &vr->alloc_list, list) {
+		if (va->paddr > new->paddr)
+			break;
+	}
+
+	list_add_tail(&new->list, &va->list);
+
+	return new;
+}
+
+static void omap_vram_free_allocation(struct vram_alloc *va)
+{
+	list_del(&va->list);
+	kfree(va);
+}
+
+int omap_vram_add_region(unsigned long paddr, size_t size)
+{
+	struct vram_region *rm;
+	unsigned pages;
+
+	if (vram_initialized) {
+		DBG("adding region paddr %08lx size %d\n",
+				paddr, size);
+
+		size &= PAGE_MASK;
+		pages = size >> PAGE_SHIFT;
+
+		rm = omap_vram_create_region(paddr, pages);
+		if (rm == NULL)
+			return -ENOMEM;
+
+		list_add(&rm->list, &region_list);
+	} else {
+		if (postponed_cnt == MAX_POSTPONED_REGIONS)
+			return -ENOMEM;
+
+		postponed_regions[postponed_cnt].paddr = paddr;
+		postponed_regions[postponed_cnt].size = size;
+
+		++postponed_cnt;
+	}
+	return 0;
+}
+
+int omap_vram_free(unsigned long paddr, size_t size)
+{
+	struct vram_region *rm;
+	struct vram_alloc *alloc;
+	unsigned start, end;
+
+	DBG("free mem paddr %08lx size %d\n", paddr, size);
+
+	size = PAGE_ALIGN(size);
+
+	mutex_lock(&region_mutex);
+
+	list_for_each_entry(rm, &region_list, list) {
+		list_for_each_entry(alloc, &rm->alloc_list, list) {
+			start = alloc->paddr;
+			end = alloc->paddr + (alloc->pages >> PAGE_SHIFT);
+
+			if (start >= paddr && end < paddr + size)
+				goto found;
+		}
+	}
+
+	mutex_unlock(&region_mutex);
+	return -EINVAL;
+
+found:
+	omap_vram_free_allocation(alloc);
+
+	mutex_unlock(&region_mutex);
+	return 0;
+}
+EXPORT_SYMBOL(omap_vram_free);
+
+static int _omap_vram_reserve(unsigned long paddr, unsigned pages)
+{
+	struct vram_region *rm;
+	struct vram_alloc *alloc;
+	size_t size;
+
+	size = pages << PAGE_SHIFT;
+
+	list_for_each_entry(rm, &region_list, list) {
+		unsigned long start, end;
+
+		DBG("checking region %lx %d\n", rm->paddr, rm->pages);
+
+		if (region_mem_type(rm->paddr) != region_mem_type(paddr))
+			continue;
+
+		start = rm->paddr;
+		end = start + (rm->pages << PAGE_SHIFT) - 1;
+		if (start > paddr || end < paddr + size - 1)
+			continue;
+
+		DBG("block ok, checking allocs\n");
+
+		list_for_each_entry(alloc, &rm->alloc_list, list) {
+			end = alloc->paddr - 1;
+
+			if (start <= paddr && end >= paddr + size - 1)
+				goto found;
+
+			start = alloc->paddr + (alloc->pages << PAGE_SHIFT);
+		}
+
+		end = rm->paddr + (rm->pages << PAGE_SHIFT) - 1;
+
+		if (!(start <= paddr && end >= paddr + size - 1))
+			continue;
+found:
+		DBG("found area start %lx, end %lx\n", start, end);
+
+		if (omap_vram_create_allocation(rm, paddr, pages) == NULL)
+			return -ENOMEM;
+
+		return 0;
+	}
+
+	return -ENOMEM;
+}
+
+int omap_vram_reserve(unsigned long paddr, size_t size)
+{
+	unsigned pages;
+	int r;
+
+	DBG("reserve mem paddr %08lx size %d\n", paddr, size);
+
+	size = PAGE_ALIGN(size);
+	pages = size >> PAGE_SHIFT;
+
+	mutex_lock(&region_mutex);
+
+	r = _omap_vram_reserve(paddr, pages);
+
+	mutex_unlock(&region_mutex);
+
+	return r;
+}
+EXPORT_SYMBOL(omap_vram_reserve);
+
+static void _omap_vram_dma_cb(int lch, u16 ch_status, void *data)
+{
+	struct completion *compl = data;
+	complete(compl);
+}
+
+static int _omap_vram_clear(u32 paddr, unsigned pages)
+{
+	struct completion compl;
+	unsigned elem_count;
+	unsigned frame_count;
+	int r;
+	int lch;
+
+	init_completion(&compl);
+
+	r = omap_request_dma(OMAP_DMA_NO_DEVICE, "VRAM DMA",
+			_omap_vram_dma_cb,
+			&compl, &lch);
+	if (r) {
+		pr_err("VRAM: request_dma failed for memory clear\n");
+		return -EBUSY;
+	}
+
+	elem_count = pages * PAGE_SIZE / 4;
+	frame_count = 1;
+
+	omap_set_dma_transfer_params(lch, OMAP_DMA_DATA_TYPE_S32,
+			elem_count, frame_count,
+			OMAP_DMA_SYNC_ELEMENT,
+			0, 0);
+
+	omap_set_dma_dest_params(lch, 0, OMAP_DMA_AMODE_POST_INC,
+			paddr, 0, 0);
+
+	omap_set_dma_color_mode(lch, OMAP_DMA_CONSTANT_FILL, 0x000000);
+
+	omap_start_dma(lch);
+
+	if (wait_for_completion_timeout(&compl, msecs_to_jiffies(1000)) == 0) {
+		omap_stop_dma(lch);
+		pr_err("VRAM: dma timeout while clearing memory\n");
+		r = -EIO;
+		goto err;
+	}
+
+	r = 0;
+err:
+	omap_free_dma(lch);
+
+	return r;
+}
+
+static int _omap_vram_alloc(int mtype, unsigned pages, unsigned long *paddr)
+{
+	struct vram_region *rm;
+	struct vram_alloc *alloc;
+
+	list_for_each_entry(rm, &region_list, list) {
+		unsigned long start, end;
+
+		DBG("checking region %lx %d\n", rm->paddr, rm->pages);
+
+		if (region_mem_type(rm->paddr) != mtype)
+			continue;
+
+		start = rm->paddr;
+
+		list_for_each_entry(alloc, &rm->alloc_list, list) {
+			end = alloc->paddr;
+
+			if (end - start >= pages << PAGE_SHIFT)
+				goto found;
+
+			start = alloc->paddr + (alloc->pages << PAGE_SHIFT);
+		}
+
+		end = rm->paddr + (rm->pages << PAGE_SHIFT);
+found:
+		if (end - start < pages << PAGE_SHIFT)
+			continue;
+
+		DBG("found %lx, end %lx\n", start, end);
+
+		alloc = omap_vram_create_allocation(rm, start, pages);
+		if (alloc == NULL)
+			return -ENOMEM;
+
+		*paddr = start;
+
+		_omap_vram_clear(start, pages);
+
+		return 0;
+	}
+
+	return -ENOMEM;
+}
+
+int omap_vram_alloc(int mtype, size_t size, unsigned long *paddr)
+{
+	unsigned pages;
+	int r;
+
+	BUG_ON(mtype > OMAP_VRAM_MEMTYPE_MAX || !size);
+
+	DBG("alloc mem type %d size %d\n", mtype, size);
+
+	size = PAGE_ALIGN(size);
+	pages = size >> PAGE_SHIFT;
+
+	mutex_lock(&region_mutex);
+
+	r = _omap_vram_alloc(mtype, pages, paddr);
+
+	mutex_unlock(&region_mutex);
+
+	return r;
+}
+EXPORT_SYMBOL(omap_vram_alloc);
+
+void omap_vram_get_info(unsigned long *vram,
+		unsigned long *free_vram,
+		unsigned long *largest_free_block)
+{
+	struct vram_region *vr;
+	struct vram_alloc *va;
+
+	*vram = 0;
+	*free_vram = 0;
+	*largest_free_block = 0;
+
+	mutex_lock(&region_mutex);
+
+	list_for_each_entry(vr, &region_list, list) {
+		unsigned free;
+		unsigned long pa;
+
+		pa = vr->paddr;
+		*vram += vr->pages << PAGE_SHIFT;
+
+		list_for_each_entry(va, &vr->alloc_list, list) {
+			free = va->paddr - pa;
+			*free_vram += free;
+			if (free > *largest_free_block)
+				*largest_free_block = free;
+			pa = va->paddr + (va->pages << PAGE_SHIFT);
+		}
+
+		free = vr->paddr + (vr->pages << PAGE_SHIFT) - pa;
+		*free_vram += free;
+		if (free > *largest_free_block)
+			*largest_free_block = free;
+	}
+
+	mutex_unlock(&region_mutex);
+}
+EXPORT_SYMBOL(omap_vram_get_info);
+
+#if defined(CONFIG_DEBUG_FS)
+static int vram_debug_show(struct seq_file *s, void *unused)
+{
+	struct vram_region *vr;
+	struct vram_alloc *va;
+	unsigned size;
+
+	mutex_lock(&region_mutex);
+
+	list_for_each_entry(vr, &region_list, list) {
+		size = vr->pages << PAGE_SHIFT;
+		seq_printf(s, "%08lx-%08lx (%d bytes)\n",
+				vr->paddr, vr->paddr + size - 1,
+				size);
+
+		list_for_each_entry(va, &vr->alloc_list, list) {
+			size = va->pages << PAGE_SHIFT;
+			seq_printf(s, "    %08lx-%08lx (%d bytes)\n",
+					va->paddr, va->paddr + size - 1,
+					size);
+		}
+	}
+
+	mutex_unlock(&region_mutex);
+
+	return 0;
+}
+
+static int vram_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, vram_debug_show, inode->i_private);
+}
+
+static const struct file_operations vram_debug_fops = {
+	.open           = vram_debug_open,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.release        = single_release,
+};
+
+static int __init omap_vram_create_debugfs(void)
+{
+	struct dentry *d;
+
+	d = debugfs_create_file("vram", S_IRUGO, NULL,
+			NULL, &vram_debug_fops);
+	if (IS_ERR(d))
+		return PTR_ERR(d);
+
+	return 0;
+}
+#endif
+
+static __init int omap_vram_init(void)
+{
+	int i;
+
+	vram_initialized = 1;
+
+	for (i = 0; i < postponed_cnt; i++)
+		omap_vram_add_region(postponed_regions[i].paddr,
+				postponed_regions[i].size);
+
+#ifdef CONFIG_DEBUG_FS
+	if (omap_vram_create_debugfs())
+		pr_err("VRAM: Failed to create debugfs file\n");
+#endif
+
+	return 0;
+}
+
+arch_initcall(omap_vram_init);
+
+/* boottime vram alloc stuff */
+
+/* set from board file */
+static u32 omap_vram_sram_start __initdata;
+static u32 omap_vram_sram_size __initdata;
+
+/* set from board file */
+static u32 omap_vram_sdram_start __initdata;
+static u32 omap_vram_sdram_size __initdata;
+
+/* set from kernel cmdline */
+static u32 omap_vram_def_sdram_size __initdata;
+static u32 omap_vram_def_sdram_start __initdata;
+
+static void __init omap_vram_early_vram(char **p)
+{
+	omap_vram_def_sdram_size = memparse(*p, p);
+	if (**p == ',')
+		omap_vram_def_sdram_start = simple_strtoul((*p) + 1, p, 16);
+}
+__early_param("vram=", omap_vram_early_vram);
+
+/*
+ * Called from map_io. We need to call to this early enough so that we
+ * can reserve the fixed SDRAM regions before VM could get hold of them.
+ */
+void __init omap_vram_reserve_sdram(void)
+{
+	struct bootmem_data	*bdata;
+	unsigned long		sdram_start, sdram_size;
+	u32 paddr;
+	u32 size = 0;
+
+	/* cmdline arg overrides the board file definition */
+	if (omap_vram_def_sdram_size) {
+		size = omap_vram_def_sdram_size;
+		paddr = omap_vram_def_sdram_start;
+	}
+
+	if (!size) {
+		size = omap_vram_sdram_size;
+		paddr = omap_vram_sdram_start;
+	}
+
+#ifdef CONFIG_OMAP2_VRAM_SIZE
+	if (!size) {
+		size = CONFIG_OMAP2_VRAM_SIZE * 1024 * 1024;
+		paddr = 0;
+	}
+#endif
+
+	if (!size)
+		return;
+
+	size = PAGE_ALIGN(size);
+
+	bdata = NODE_DATA(0)->bdata;
+	sdram_start = bdata->node_min_pfn << PAGE_SHIFT;
+	sdram_size = (bdata->node_low_pfn << PAGE_SHIFT) - sdram_start;
+
+	if (paddr) {
+		if ((paddr & ~PAGE_MASK) || paddr < sdram_start ||
+				paddr + size > sdram_start + sdram_size) {
+			pr_err("Illegal SDRAM region for VRAM\n");
+			return;
+		}
+
+		if (reserve_bootmem(paddr, size, BOOTMEM_EXCLUSIVE) < 0) {
+			pr_err("FB: failed to reserve VRAM\n");
+			return;
+		}
+	} else {
+		if (size > sdram_size) {
+			pr_err("Illegal SDRAM size for VRAM\n");
+			return;
+		}
+
+		paddr = virt_to_phys(alloc_bootmem_pages(size));
+		BUG_ON(paddr & ~PAGE_MASK);
+	}
+
+	omap_vram_add_region(paddr, size);
+
+	pr_info("Reserving %u bytes SDRAM for VRAM\n", size);
+}
+
+/*
+ * Called at sram init time, before anything is pushed to the SRAM stack.
+ * Because of the stack scheme, we will allocate everything from the
+ * start of the lowest address region to the end of SRAM. This will also
+ * include padding for page alignment and possible holes between regions.
+ *
+ * As opposed to the SDRAM case, we'll also do any dynamic allocations at
+ * this point, since the driver built as a module would have problem with
+ * freeing / reallocating the regions.
+ */
+unsigned long __init omap_vram_reserve_sram(unsigned long sram_pstart,
+				  unsigned long sram_vstart,
+				  unsigned long sram_size,
+				  unsigned long pstart_avail,
+				  unsigned long size_avail)
+{
+	unsigned long			pend_avail;
+	unsigned long			reserved;
+	u32 paddr;
+	u32 size;
+
+	paddr = omap_vram_sram_start;
+	size = omap_vram_sram_size;
+
+	if (!size)
+		return 0;
+
+	reserved = 0;
+	pend_avail = pstart_avail + size_avail;
+
+	if (!paddr) {
+		/* Dynamic allocation */
+		if ((size_avail & PAGE_MASK) < size) {
+			pr_err("Not enough SRAM for VRAM\n");
+			return 0;
+		}
+		size_avail = (size_avail - size) & PAGE_MASK;
+		paddr = pstart_avail + size_avail;
+	}
+
+	if (paddr < sram_pstart ||
+			paddr + size > sram_pstart + sram_size) {
+		pr_err("Illegal SRAM region for VRAM\n");
+		return 0;
+	}
+
+	/* Reserve everything above the start of the region. */
+	if (pend_avail - paddr > reserved)
+		reserved = pend_avail - paddr;
+	size_avail = pend_avail - reserved - pstart_avail;
+
+	omap_vram_add_region(paddr, size);
+
+	if (reserved)
+		pr_info("Reserving %lu bytes SRAM for VRAM\n", reserved);
+
+	return reserved;
+}
+
+void __init omap_vram_set_sdram_vram(u32 size, u32 start)
+{
+	omap_vram_sdram_start = start;
+	omap_vram_sdram_size = size;
+}
+
+void __init omap_vram_set_sram_vram(u32 size, u32 start)
+{
+	omap_vram_sram_start = start;
+	omap_vram_sram_size = size;
+}
diff --git a/drivers/video/omap2/vrfb.c b/drivers/video/omap2/vrfb.c
new file mode 100644
index 0000000..fd22716
--- /dev/null
+++ b/drivers/video/omap2/vrfb.c
@@ -0,0 +1,315 @@
+/*
+ * VRFB Rotation Engine
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+/*#define DEBUG*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+
+#include <mach/io.h>
+#include <plat/vrfb.h>
+#include <plat/sdrc.h>
+
+#ifdef DEBUG
+#define DBG(format, ...) pr_debug("VRFB: " format, ## __VA_ARGS__)
+#else
+#define DBG(format, ...)
+#endif
+
+#define SMS_ROT_VIRT_BASE(context, rot) \
+	(((context >= 4) ? 0xD0000000 : 0x70000000) \
+	 + (0x4000000 * (context)) \
+	 + (0x1000000 * (rot)))
+
+#define OMAP_VRFB_SIZE			(2048 * 2048 * 4)
+
+#define VRFB_PAGE_WIDTH_EXP	5 /* Assuming SDRAM pagesize= 1024 */
+#define VRFB_PAGE_HEIGHT_EXP	5 /* 1024 = 2^5 * 2^5 */
+#define VRFB_PAGE_WIDTH		(1 << VRFB_PAGE_WIDTH_EXP)
+#define VRFB_PAGE_HEIGHT	(1 << VRFB_PAGE_HEIGHT_EXP)
+#define SMS_IMAGEHEIGHT_OFFSET	16
+#define SMS_IMAGEWIDTH_OFFSET	0
+#define SMS_PH_OFFSET		8
+#define SMS_PW_OFFSET		4
+#define SMS_PS_OFFSET		0
+
+#define VRFB_NUM_CTXS 12
+/* bitmap of reserved contexts */
+static unsigned long ctx_map;
+
+static DEFINE_MUTEX(ctx_lock);
+
+/*
+ * Access to this happens from client drivers or the PM core after wake-up.
+ * For the first case we require locking at the driver level, for the second
+ * we don't need locking, since no drivers will run until after the wake-up
+ * has finished.
+ */
+static struct {
+	u32 physical_ba;
+	u32 control;
+	u32 size;
+} vrfb_hw_context[VRFB_NUM_CTXS];
+
+static inline void restore_hw_context(int ctx)
+{
+	omap2_sms_write_rot_control(vrfb_hw_context[ctx].control, ctx);
+	omap2_sms_write_rot_size(vrfb_hw_context[ctx].size, ctx);
+	omap2_sms_write_rot_physical_ba(vrfb_hw_context[ctx].physical_ba, ctx);
+}
+
+static u32 get_image_width_roundup(u16 width, u8 bytespp)
+{
+	unsigned long stride = width * bytespp;
+	unsigned long ceil_pages_per_stride = (stride / VRFB_PAGE_WIDTH) +
+		(stride % VRFB_PAGE_WIDTH != 0);
+
+	return ceil_pages_per_stride * VRFB_PAGE_WIDTH / bytespp;
+}
+
+/*
+ * This the extra space needed in the VRFB physical area for VRFB to safely wrap
+ * any memory accesses to the invisible part of the virtual view to the physical
+ * area.
+ */
+static inline u32 get_extra_physical_size(u16 image_width_roundup, u8 bytespp)
+{
+	return (OMAP_VRFB_LINE_LEN - image_width_roundup) * VRFB_PAGE_HEIGHT *
+		bytespp;
+}
+
+void omap_vrfb_restore_context(void)
+{
+	int i;
+	unsigned long map = ctx_map;
+
+	for (i = ffs(map); i; i = ffs(map)) {
+		/* i=1..32 */
+		i--;
+		map &= ~(1 << i);
+		restore_hw_context(i);
+	}
+}
+
+void omap_vrfb_adjust_size(u16 *width, u16 *height,
+		u8 bytespp)
+{
+	*width = ALIGN(*width * bytespp, VRFB_PAGE_WIDTH) / bytespp;
+	*height = ALIGN(*height, VRFB_PAGE_HEIGHT);
+}
+EXPORT_SYMBOL(omap_vrfb_adjust_size);
+
+u32 omap_vrfb_min_phys_size(u16 width, u16 height, u8 bytespp)
+{
+	unsigned long image_width_roundup = get_image_width_roundup(width,
+		bytespp);
+
+	if (image_width_roundup > OMAP_VRFB_LINE_LEN)
+		return 0;
+
+	return (width * height * bytespp) + get_extra_physical_size(
+		image_width_roundup, bytespp);
+}
+EXPORT_SYMBOL(omap_vrfb_min_phys_size);
+
+u16 omap_vrfb_max_height(u32 phys_size, u16 width, u8 bytespp)
+{
+	unsigned long image_width_roundup = get_image_width_roundup(width,
+		bytespp);
+	unsigned long height;
+	unsigned long extra;
+
+	if (image_width_roundup > OMAP_VRFB_LINE_LEN)
+		return 0;
+
+	extra = get_extra_physical_size(image_width_roundup, bytespp);
+
+	if (phys_size < extra)
+		return 0;
+
+	height = (phys_size - extra) / (width * bytespp);
+
+	/* Virtual views provided by VRFB are limited to 2048x2048. */
+	return min_t(unsigned long, height, 2048);
+}
+EXPORT_SYMBOL(omap_vrfb_max_height);
+
+void omap_vrfb_setup(struct vrfb *vrfb, unsigned long paddr,
+		u16 width, u16 height,
+		unsigned bytespp, bool yuv_mode)
+{
+	unsigned pixel_size_exp;
+	u16 vrfb_width;
+	u16 vrfb_height;
+	u8 ctx = vrfb->context;
+	u32 size;
+	u32 control;
+
+	DBG("omapfb_set_vrfb(%d, %lx, %dx%d, %d, %d)\n", ctx, paddr,
+			width, height, bytespp, yuv_mode);
+
+	/* For YUV2 and UYVY modes VRFB needs to handle pixels a bit
+	 * differently. See TRM. */
+	if (yuv_mode) {
+		bytespp *= 2;
+		width /= 2;
+	}
+
+	if (bytespp == 4)
+		pixel_size_exp = 2;
+	else if (bytespp == 2)
+		pixel_size_exp = 1;
+	else
+		BUG();
+
+	vrfb_width = ALIGN(width * bytespp, VRFB_PAGE_WIDTH) / bytespp;
+	vrfb_height = ALIGN(height, VRFB_PAGE_HEIGHT);
+
+	DBG("vrfb w %u, h %u bytespp %d\n", vrfb_width, vrfb_height, bytespp);
+
+	size  = vrfb_width << SMS_IMAGEWIDTH_OFFSET;
+	size |= vrfb_height << SMS_IMAGEHEIGHT_OFFSET;
+
+	control  = pixel_size_exp << SMS_PS_OFFSET;
+	control |= VRFB_PAGE_WIDTH_EXP  << SMS_PW_OFFSET;
+	control |= VRFB_PAGE_HEIGHT_EXP << SMS_PH_OFFSET;
+
+	vrfb_hw_context[ctx].physical_ba = paddr;
+	vrfb_hw_context[ctx].size = size;
+	vrfb_hw_context[ctx].control = control;
+
+	omap2_sms_write_rot_physical_ba(paddr, ctx);
+	omap2_sms_write_rot_size(size, ctx);
+	omap2_sms_write_rot_control(control, ctx);
+
+	DBG("vrfb offset pixels %d, %d\n",
+			vrfb_width - width, vrfb_height - height);
+
+	vrfb->xres = width;
+	vrfb->yres = height;
+	vrfb->xoffset = vrfb_width - width;
+	vrfb->yoffset = vrfb_height - height;
+	vrfb->bytespp = bytespp;
+	vrfb->yuv_mode = yuv_mode;
+}
+EXPORT_SYMBOL(omap_vrfb_setup);
+
+int omap_vrfb_map_angle(struct vrfb *vrfb, u16 height, u8 rot)
+{
+	unsigned long size = height * OMAP_VRFB_LINE_LEN * vrfb->bytespp;
+
+	vrfb->vaddr[rot] = ioremap_wc(vrfb->paddr[rot], size);
+
+	if (!vrfb->vaddr[rot]) {
+		printk(KERN_ERR "vrfb: ioremap failed\n");
+		return -ENOMEM;
+	}
+
+	DBG("ioremapped vrfb area %d of size %lu into %p\n", rot, size,
+		vrfb->vaddr[rot]);
+
+	return 0;
+}
+EXPORT_SYMBOL(omap_vrfb_map_angle);
+
+void omap_vrfb_release_ctx(struct vrfb *vrfb)
+{
+	int rot;
+	int ctx = vrfb->context;
+
+	if (ctx == 0xff)
+		return;
+
+	DBG("release ctx %d\n", ctx);
+
+	mutex_lock(&ctx_lock);
+
+	BUG_ON(!(ctx_map & (1 << ctx)));
+
+	clear_bit(ctx, &ctx_map);
+
+	for (rot = 0; rot < 4; ++rot) {
+		if (vrfb->paddr[rot]) {
+			release_mem_region(vrfb->paddr[rot], OMAP_VRFB_SIZE);
+			vrfb->paddr[rot] = 0;
+		}
+	}
+
+	vrfb->context = 0xff;
+
+	mutex_unlock(&ctx_lock);
+}
+EXPORT_SYMBOL(omap_vrfb_release_ctx);
+
+int omap_vrfb_request_ctx(struct vrfb *vrfb)
+{
+	int rot;
+	u32 paddr;
+	u8 ctx;
+	int r;
+
+	DBG("request ctx\n");
+
+	mutex_lock(&ctx_lock);
+
+	for (ctx = 0; ctx < VRFB_NUM_CTXS; ++ctx)
+		if ((ctx_map & (1 << ctx)) == 0)
+			break;
+
+	if (ctx == VRFB_NUM_CTXS) {
+		pr_err("vrfb: no free contexts\n");
+		r = -EBUSY;
+		goto out;
+	}
+
+	DBG("found free ctx %d\n", ctx);
+
+	set_bit(ctx, &ctx_map);
+
+	memset(vrfb, 0, sizeof(*vrfb));
+
+	vrfb->context = ctx;
+
+	for (rot = 0; rot < 4; ++rot) {
+		paddr = SMS_ROT_VIRT_BASE(ctx, rot);
+		if (!request_mem_region(paddr, OMAP_VRFB_SIZE, "vrfb")) {
+			pr_err("vrfb: failed to reserve VRFB "
+					"area for ctx %d, rotation %d\n",
+					ctx, rot * 90);
+			omap_vrfb_release_ctx(vrfb);
+			r = -ENOMEM;
+			goto out;
+		}
+
+		vrfb->paddr[rot] = paddr;
+
+		DBG("VRFB %d/%d: %lx\n", ctx, rot*90, vrfb->paddr[rot]);
+	}
+
+	r = 0;
+out:
+	mutex_unlock(&ctx_lock);
+	return r;
+}
+EXPORT_SYMBOL(omap_vrfb_request_ctx);
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index 91a68e9..603598f 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -25,7 +25,10 @@
 #include <linux/module.h>
 #include <linux/vmalloc.h>
 #include <linux/mm.h>
+
 #include <asm/xen/hypervisor.h>
+
+#include <xen/xen.h>
 #include <xen/events.h>
 #include <xen/page.h>
 #include <xen/interface/io/fbif.h>
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 4204336..f6738d8 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -52,6 +52,8 @@
 
 #include <asm/xen/hypervisor.h>
 #include <asm/xen/hypercall.h>
+
+#include <xen/xen.h>
 #include <xen/interface/xen.h>
 #include <xen/interface/memory.h>
 #include <xen/xenbus.h>
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index 0f765a9..14e2d99 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -1,5 +1,6 @@
 #include <linux/notifier.h>
 
+#include <xen/xen.h>
 #include <xen/xenbus.h>
 
 #include <asm/xen/hypervisor.h>
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 79bedba..f70a4f4 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -48,6 +48,8 @@
 #include <linux/gfp.h>
 #include <linux/mutex.h>
 #include <linux/cpu.h>
+
+#include <xen/xen.h>
 #include <xen/events.h>
 #include <xen/evtchn.h>
 #include <asm/xen/hypervisor.h>
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 7d8f531..4c6c0bd 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -37,6 +37,7 @@
 #include <linux/vmalloc.h>
 #include <linux/uaccess.h>
 
+#include <xen/xen.h>
 #include <xen/interface/xen.h>
 #include <xen/page.h>
 #include <xen/grant_table.h>
diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
index 88a60e0..ae5cb05 100644
--- a/drivers/xen/sys-hypervisor.c
+++ b/drivers/xen/sys-hypervisor.c
@@ -14,6 +14,7 @@
 #include <asm/xen/hypervisor.h>
 #include <asm/xen/hypercall.h>
 
+#include <xen/xen.h>
 #include <xen/xenbus.h>
 #include <xen/interface/xen.h>
 #include <xen/interface/version.h>
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 649fcdf..2f7aaa9 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -49,6 +49,8 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/xen/hypervisor.h>
+
+#include <xen/xen.h>
 #include <xen/xenbus.h>
 #include <xen/events.h>
 #include <xen/page.h>
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index 6559e0c..8924d93 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -13,6 +13,8 @@
 #include <linux/fs.h>
 #include <linux/magic.h>
 
+#include <xen/xen.h>
+
 #include "xenfs.h"
 
 #include <asm/xen/hypervisor.h>
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
index d69e6ae..3f959f1 100644
--- a/fs/nilfs2/alloc.c
+++ b/fs/nilfs2/alloc.c
@@ -142,29 +142,75 @@
 	}
 }
 
+static int nilfs_palloc_get_block(struct inode *inode, unsigned long blkoff,
+				  int create,
+				  void (*init_block)(struct inode *,
+						     struct buffer_head *,
+						     void *),
+				  struct buffer_head **bhp,
+				  struct nilfs_bh_assoc *prev,
+				  spinlock_t *lock)
+{
+	int ret;
+
+	spin_lock(lock);
+	if (prev->bh && blkoff == prev->blkoff) {
+		get_bh(prev->bh);
+		*bhp = prev->bh;
+		spin_unlock(lock);
+		return 0;
+	}
+	spin_unlock(lock);
+
+	ret = nilfs_mdt_get_block(inode, blkoff, create, init_block, bhp);
+	if (!ret) {
+		spin_lock(lock);
+		/*
+		 * The following code must be safe for change of the
+		 * cache contents during the get block call.
+		 */
+		brelse(prev->bh);
+		get_bh(*bhp);
+		prev->bh = *bhp;
+		prev->blkoff = blkoff;
+		spin_unlock(lock);
+	}
+	return ret;
+}
+
 static int nilfs_palloc_get_desc_block(struct inode *inode,
 				       unsigned long group,
 				       int create, struct buffer_head **bhp)
 {
-	return nilfs_mdt_get_block(inode,
-				   nilfs_palloc_desc_blkoff(inode, group),
-				   create, nilfs_palloc_desc_block_init, bhp);
+	struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
+
+	return nilfs_palloc_get_block(inode,
+				      nilfs_palloc_desc_blkoff(inode, group),
+				      create, nilfs_palloc_desc_block_init,
+				      bhp, &cache->prev_desc, &cache->lock);
 }
 
 static int nilfs_palloc_get_bitmap_block(struct inode *inode,
 					 unsigned long group,
 					 int create, struct buffer_head **bhp)
 {
-	return nilfs_mdt_get_block(inode,
-				   nilfs_palloc_bitmap_blkoff(inode, group),
-				   create, NULL, bhp);
+	struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
+
+	return nilfs_palloc_get_block(inode,
+				      nilfs_palloc_bitmap_blkoff(inode, group),
+				      create, NULL, bhp,
+				      &cache->prev_bitmap, &cache->lock);
 }
 
 int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr,
 				 int create, struct buffer_head **bhp)
 {
-	return nilfs_mdt_get_block(inode, nilfs_palloc_entry_blkoff(inode, nr),
-				   create, NULL, bhp);
+	struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
+
+	return nilfs_palloc_get_block(inode,
+				      nilfs_palloc_entry_blkoff(inode, nr),
+				      create, NULL, bhp,
+				      &cache->prev_entry, &cache->lock);
 }
 
 static struct nilfs_palloc_group_desc *
@@ -176,13 +222,6 @@
 		group % nilfs_palloc_groups_per_desc_block(inode);
 }
 
-static unsigned char *
-nilfs_palloc_block_get_bitmap(const struct inode *inode,
-			      const struct buffer_head *bh, void *kaddr)
-{
-	return (unsigned char *)(kaddr + bh_offset(bh));
-}
-
 void *nilfs_palloc_block_get_entry(const struct inode *inode, __u64 nr,
 				   const struct buffer_head *bh, void *kaddr)
 {
@@ -289,8 +328,7 @@
 				if (ret < 0)
 					goto out_desc;
 				bitmap_kaddr = kmap(bitmap_bh->b_page);
-				bitmap = nilfs_palloc_block_get_bitmap(
-					inode, bitmap_bh, bitmap_kaddr);
+				bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
 				pos = nilfs_palloc_find_available_slot(
 					inode, group, group_offset, bitmap,
 					entries_per_group);
@@ -351,8 +389,7 @@
 	desc = nilfs_palloc_block_get_group_desc(inode, group,
 						 req->pr_desc_bh, desc_kaddr);
 	bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page);
-	bitmap = nilfs_palloc_block_get_bitmap(inode, req->pr_bitmap_bh,
-					       bitmap_kaddr);
+	bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
 
 	if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group),
 				    group_offset, bitmap))
@@ -385,8 +422,7 @@
 	desc = nilfs_palloc_block_get_group_desc(inode, group,
 						 req->pr_desc_bh, desc_kaddr);
 	bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page);
-	bitmap = nilfs_palloc_block_get_bitmap(inode, req->pr_bitmap_bh,
-					       bitmap_kaddr);
+	bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
 	if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group),
 				    group_offset, bitmap))
 		printk(KERN_WARNING "%s: entry numer %llu already freed\n",
@@ -472,8 +508,7 @@
 		desc = nilfs_palloc_block_get_group_desc(
 			inode, group, desc_bh, desc_kaddr);
 		bitmap_kaddr = kmap(bitmap_bh->b_page);
-		bitmap = nilfs_palloc_block_get_bitmap(
-			inode, bitmap_bh, bitmap_kaddr);
+		bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
 		for (j = i, n = 0;
 		     (j < nitems) && nilfs_palloc_group_is_in(inode, group,
 							      entry_nrs[j]);
@@ -502,3 +537,30 @@
 	}
 	return 0;
 }
+
+void nilfs_palloc_setup_cache(struct inode *inode,
+			      struct nilfs_palloc_cache *cache)
+{
+	NILFS_MDT(inode)->mi_palloc_cache = cache;
+	spin_lock_init(&cache->lock);
+}
+
+void nilfs_palloc_clear_cache(struct inode *inode)
+{
+	struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
+
+	spin_lock(&cache->lock);
+	brelse(cache->prev_desc.bh);
+	brelse(cache->prev_bitmap.bh);
+	brelse(cache->prev_entry.bh);
+	cache->prev_desc.bh = NULL;
+	cache->prev_bitmap.bh = NULL;
+	cache->prev_entry.bh = NULL;
+	spin_unlock(&cache->lock);
+}
+
+void nilfs_palloc_destroy_cache(struct inode *inode)
+{
+	nilfs_palloc_clear_cache(inode);
+	NILFS_MDT(inode)->mi_palloc_cache = NULL;
+}
diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h
index 4ace547..f4543ac 100644
--- a/fs/nilfs2/alloc.h
+++ b/fs/nilfs2/alloc.h
@@ -69,4 +69,25 @@
 #define nilfs_clear_bit_atomic		ext2_clear_bit_atomic
 #define nilfs_find_next_zero_bit	ext2_find_next_zero_bit
 
+/*
+ * persistent object allocator cache
+ */
+
+struct nilfs_bh_assoc {
+	unsigned long blkoff;
+	struct buffer_head *bh;
+};
+
+struct nilfs_palloc_cache {
+	spinlock_t lock;
+	struct nilfs_bh_assoc prev_desc;
+	struct nilfs_bh_assoc prev_bitmap;
+	struct nilfs_bh_assoc prev_entry;
+};
+
+void nilfs_palloc_setup_cache(struct inode *inode,
+			      struct nilfs_palloc_cache *cache);
+void nilfs_palloc_clear_cache(struct inode *inode);
+void nilfs_palloc_destroy_cache(struct inode *inode);
+
 #endif	/* _NILFS_ALLOC_H */
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index 08834df..f4a14ea 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -402,19 +402,11 @@
 void nilfs_bmap_add_blocks(const struct nilfs_bmap *bmap, int n)
 {
 	inode_add_bytes(bmap->b_inode, (1 << bmap->b_inode->i_blkbits) * n);
-	if (NILFS_MDT(bmap->b_inode))
-		nilfs_mdt_mark_dirty(bmap->b_inode);
-	else
-		mark_inode_dirty(bmap->b_inode);
 }
 
 void nilfs_bmap_sub_blocks(const struct nilfs_bmap *bmap, int n)
 {
 	inode_sub_bytes(bmap->b_inode, (1 << bmap->b_inode->i_blkbits) * n);
-	if (NILFS_MDT(bmap->b_inode))
-		nilfs_mdt_mark_dirty(bmap->b_inode);
-	else
-		mark_inode_dirty(bmap->b_inode);
 }
 
 __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap,
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index 84c2538..471e269 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -68,9 +68,34 @@
 	truncate_inode_pages(btnc, 0);
 }
 
+struct buffer_head *
+nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
+{
+	struct inode *inode = NILFS_BTNC_I(btnc);
+	struct buffer_head *bh;
+
+	bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node);
+	if (unlikely(!bh))
+		return NULL;
+
+	if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) ||
+		     buffer_dirty(bh))) {
+		brelse(bh);
+		BUG();
+	}
+	memset(bh->b_data, 0, 1 << inode->i_blkbits);
+	bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev;
+	bh->b_blocknr = blocknr;
+	set_buffer_mapped(bh);
+	set_buffer_uptodate(bh);
+
+	unlock_page(bh->b_page);
+	page_cache_release(bh->b_page);
+	return bh;
+}
+
 int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
-			      sector_t pblocknr, struct buffer_head **pbh,
-			      int newblk)
+			      sector_t pblocknr, struct buffer_head **pbh)
 {
 	struct buffer_head *bh;
 	struct inode *inode = NILFS_BTNC_I(btnc);
@@ -81,19 +106,6 @@
 		return -ENOMEM;
 
 	err = -EEXIST; /* internal code */
-	if (newblk) {
-		if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) ||
-			     buffer_dirty(bh))) {
-			brelse(bh);
-			BUG();
-		}
-		memset(bh->b_data, 0, 1 << inode->i_blkbits);
-		bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev;
-		bh->b_blocknr = blocknr;
-		set_buffer_mapped(bh);
-		set_buffer_uptodate(bh);
-		goto found;
-	}
 
 	if (buffer_uptodate(bh) || buffer_dirty(bh))
 		goto found;
@@ -135,27 +147,6 @@
 	return err;
 }
 
-int nilfs_btnode_get(struct address_space *btnc, __u64 blocknr,
-		     sector_t pblocknr, struct buffer_head **pbh, int newblk)
-{
-	struct buffer_head *bh;
-	int err;
-
-	err = nilfs_btnode_submit_block(btnc, blocknr, pblocknr, pbh, newblk);
-	if (err == -EEXIST) /* internal code (cache hit) */
-		return 0;
-	if (unlikely(err))
-		return err;
-
-	bh = *pbh;
-	wait_on_buffer(bh);
-	if (!buffer_uptodate(bh)) {
-		brelse(bh);
-		return -EIO;
-	}
-	return 0;
-}
-
 /**
  * nilfs_btnode_delete - delete B-tree node buffer
  * @bh: buffer to be deleted
@@ -244,12 +235,13 @@
 		unlock_page(obh->b_page);
 	}
 
-	err = nilfs_btnode_get(btnc, newkey, 0, &nbh, 1);
-	if (likely(!err)) {
-		BUG_ON(nbh == obh);
-		ctxt->newbh = nbh;
-	}
-	return err;
+	nbh = nilfs_btnode_create_block(btnc, newkey);
+	if (!nbh)
+		return -ENOMEM;
+
+	BUG_ON(nbh == obh);
+	ctxt->newbh = nbh;
+	return 0;
 
  failed_unlock:
 	unlock_page(obh->b_page);
diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h
index 3e22751..07da83f 100644
--- a/fs/nilfs2/btnode.h
+++ b/fs/nilfs2/btnode.h
@@ -40,10 +40,10 @@
 void nilfs_btnode_cache_init_once(struct address_space *);
 void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *);
 void nilfs_btnode_cache_clear(struct address_space *);
+struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
+					      __u64 blocknr);
 int nilfs_btnode_submit_block(struct address_space *, __u64, sector_t,
-			      struct buffer_head **, int);
-int nilfs_btnode_get(struct address_space *, __u64, sector_t,
-		     struct buffer_head **, int);
+			      struct buffer_head **);
 void nilfs_btnode_delete(struct buffer_head *);
 int nilfs_btnode_prepare_change_key(struct address_space *,
 				    struct nilfs_btnode_chkey_ctxt *);
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index e25b507..7cdd98b 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -114,7 +114,18 @@
 {
 	struct address_space *btnc =
 		&NILFS_BMAP_I((struct nilfs_bmap *)btree)->i_btnode_cache;
-	return nilfs_btnode_get(btnc, ptr, 0, bhp, 0);
+	int err;
+
+	err = nilfs_btnode_submit_block(btnc, ptr, 0, bhp);
+	if (err)
+		return err == -EEXIST ? 0 : err;
+
+	wait_on_buffer(*bhp);
+	if (!buffer_uptodate(*bhp)) {
+		brelse(*bhp);
+		return -EIO;
+	}
+	return 0;
 }
 
 static int nilfs_btree_get_new_block(const struct nilfs_btree *btree,
@@ -122,12 +133,15 @@
 {
 	struct address_space *btnc =
 		&NILFS_BMAP_I((struct nilfs_bmap *)btree)->i_btnode_cache;
-	int ret;
+	struct buffer_head *bh;
 
-	ret = nilfs_btnode_get(btnc, ptr, 0, bhp, 1);
-	if (!ret)
-		set_buffer_nilfs_volatile(*bhp);
-	return ret;
+	bh = nilfs_btnode_create_block(btnc, ptr);
+	if (!bh)
+		return -ENOMEM;
+
+	set_buffer_nilfs_volatile(bh);
+	*bhp = bh;
+	return 0;
 }
 
 static inline int
@@ -444,6 +458,18 @@
 		nilfs_btree_get_nonroot_node(path, level);
 }
 
+static inline int
+nilfs_btree_bad_node(struct nilfs_btree_node *node, int level)
+{
+	if (unlikely(nilfs_btree_node_get_level(node) != level)) {
+		dump_stack();
+		printk(KERN_CRIT "NILFS: btree level mismatch: %d != %d\n",
+		       nilfs_btree_node_get_level(node), level);
+		return 1;
+	}
+	return 0;
+}
+
 static int nilfs_btree_do_lookup(const struct nilfs_btree *btree,
 				 struct nilfs_btree_path *path,
 				 __u64 key, __u64 *ptrp, int minlevel)
@@ -467,7 +493,8 @@
 		if (ret < 0)
 			return ret;
 		node = nilfs_btree_get_nonroot_node(path, level);
-		BUG_ON(level != nilfs_btree_node_get_level(node));
+		if (nilfs_btree_bad_node(node, level))
+			return -EINVAL;
 		if (!found)
 			found = nilfs_btree_node_lookup(node, key, &index);
 		else
@@ -512,7 +539,8 @@
 		if (ret < 0)
 			return ret;
 		node = nilfs_btree_get_nonroot_node(path, level);
-		BUG_ON(level != nilfs_btree_node_get_level(node));
+		if (nilfs_btree_bad_node(node, level))
+			return -EINVAL;
 		index = nilfs_btree_node_get_nchildren(node) - 1;
 		ptr = nilfs_btree_node_get_ptr(btree, node, index);
 		path[level].bp_index = index;
@@ -638,13 +666,11 @@
 {
 	if (level < nilfs_btree_height(btree) - 1) {
 		do {
-			lock_buffer(path[level].bp_bh);
 			nilfs_btree_node_set_key(
 				nilfs_btree_get_nonroot_node(path, level),
 				path[level].bp_index, key);
 			if (!buffer_dirty(path[level].bp_bh))
 				nilfs_btnode_mark_dirty(path[level].bp_bh);
-			unlock_buffer(path[level].bp_bh);
 		} while ((path[level].bp_index == 0) &&
 			 (++level < nilfs_btree_height(btree) - 1));
 	}
@@ -663,13 +689,11 @@
 	struct nilfs_btree_node *node;
 
 	if (level < nilfs_btree_height(btree) - 1) {
-		lock_buffer(path[level].bp_bh);
 		node = nilfs_btree_get_nonroot_node(path, level);
 		nilfs_btree_node_insert(btree, node, *keyp, *ptrp,
 					path[level].bp_index);
 		if (!buffer_dirty(path[level].bp_bh))
 			nilfs_btnode_mark_dirty(path[level].bp_bh);
-		unlock_buffer(path[level].bp_bh);
 
 		if (path[level].bp_index == 0)
 			nilfs_btree_promote_key(btree, path, level + 1,
@@ -689,9 +713,6 @@
 	struct nilfs_btree_node *node, *left;
 	int nchildren, lnchildren, n, move;
 
-	lock_buffer(path[level].bp_bh);
-	lock_buffer(path[level].bp_sib_bh);
-
 	node = nilfs_btree_get_nonroot_node(path, level);
 	left = nilfs_btree_get_sib_node(path, level);
 	nchildren = nilfs_btree_node_get_nchildren(node);
@@ -712,9 +733,6 @@
 	if (!buffer_dirty(path[level].bp_sib_bh))
 		nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
 
-	unlock_buffer(path[level].bp_bh);
-	unlock_buffer(path[level].bp_sib_bh);
-
 	nilfs_btree_promote_key(btree, path, level + 1,
 				nilfs_btree_node_get_key(node, 0));
 
@@ -740,9 +758,6 @@
 	struct nilfs_btree_node *node, *right;
 	int nchildren, rnchildren, n, move;
 
-	lock_buffer(path[level].bp_bh);
-	lock_buffer(path[level].bp_sib_bh);
-
 	node = nilfs_btree_get_nonroot_node(path, level);
 	right = nilfs_btree_get_sib_node(path, level);
 	nchildren = nilfs_btree_node_get_nchildren(node);
@@ -763,9 +778,6 @@
 	if (!buffer_dirty(path[level].bp_sib_bh))
 		nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
 
-	unlock_buffer(path[level].bp_bh);
-	unlock_buffer(path[level].bp_sib_bh);
-
 	path[level + 1].bp_index++;
 	nilfs_btree_promote_key(btree, path, level + 1,
 				nilfs_btree_node_get_key(right, 0));
@@ -794,9 +806,6 @@
 	__u64 newptr;
 	int nchildren, n, move;
 
-	lock_buffer(path[level].bp_bh);
-	lock_buffer(path[level].bp_sib_bh);
-
 	node = nilfs_btree_get_nonroot_node(path, level);
 	right = nilfs_btree_get_sib_node(path, level);
 	nchildren = nilfs_btree_node_get_nchildren(node);
@@ -815,9 +824,6 @@
 	if (!buffer_dirty(path[level].bp_sib_bh))
 		nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
 
-	unlock_buffer(path[level].bp_bh);
-	unlock_buffer(path[level].bp_sib_bh);
-
 	newkey = nilfs_btree_node_get_key(right, 0);
 	newptr = path[level].bp_newreq.bpr_ptr;
 
@@ -852,8 +858,6 @@
 	struct nilfs_btree_node *root, *child;
 	int n;
 
-	lock_buffer(path[level].bp_sib_bh);
-
 	root = nilfs_btree_get_root(btree);
 	child = nilfs_btree_get_sib_node(path, level);
 
@@ -865,8 +869,6 @@
 	if (!buffer_dirty(path[level].bp_sib_bh))
 		nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
 
-	unlock_buffer(path[level].bp_sib_bh);
-
 	path[level].bp_bh = path[level].bp_sib_bh;
 	path[level].bp_sib_bh = NULL;
 
@@ -1023,11 +1025,9 @@
 
 		stats->bs_nblocks++;
 
-		lock_buffer(bh);
 		nilfs_btree_node_init(btree,
 				      (struct nilfs_btree_node *)bh->b_data,
 				      0, level, 0, NULL, NULL);
-		unlock_buffer(bh);
 		path[level].bp_sib_bh = bh;
 		path[level].bp_op = nilfs_btree_split;
 	}
@@ -1052,10 +1052,8 @@
 	if (ret < 0)
 		goto err_out_curr_node;
 
-	lock_buffer(bh);
 	nilfs_btree_node_init(btree, (struct nilfs_btree_node *)bh->b_data,
 			      0, level, 0, NULL, NULL);
-	unlock_buffer(bh);
 	path[level].bp_sib_bh = bh;
 	path[level].bp_op = nilfs_btree_grow;
 
@@ -1154,13 +1152,11 @@
 	struct nilfs_btree_node *node;
 
 	if (level < nilfs_btree_height(btree) - 1) {
-		lock_buffer(path[level].bp_bh);
 		node = nilfs_btree_get_nonroot_node(path, level);
 		nilfs_btree_node_delete(btree, node, keyp, ptrp,
 					path[level].bp_index);
 		if (!buffer_dirty(path[level].bp_bh))
 			nilfs_btnode_mark_dirty(path[level].bp_bh);
-		unlock_buffer(path[level].bp_bh);
 		if (path[level].bp_index == 0)
 			nilfs_btree_promote_key(btree, path, level + 1,
 				nilfs_btree_node_get_key(node, 0));
@@ -1180,9 +1176,6 @@
 
 	nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
 
-	lock_buffer(path[level].bp_bh);
-	lock_buffer(path[level].bp_sib_bh);
-
 	node = nilfs_btree_get_nonroot_node(path, level);
 	left = nilfs_btree_get_sib_node(path, level);
 	nchildren = nilfs_btree_node_get_nchildren(node);
@@ -1197,9 +1190,6 @@
 	if (!buffer_dirty(path[level].bp_sib_bh))
 		nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
 
-	unlock_buffer(path[level].bp_bh);
-	unlock_buffer(path[level].bp_sib_bh);
-
 	nilfs_btree_promote_key(btree, path, level + 1,
 				nilfs_btree_node_get_key(node, 0));
 
@@ -1217,9 +1207,6 @@
 
 	nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
 
-	lock_buffer(path[level].bp_bh);
-	lock_buffer(path[level].bp_sib_bh);
-
 	node = nilfs_btree_get_nonroot_node(path, level);
 	right = nilfs_btree_get_sib_node(path, level);
 	nchildren = nilfs_btree_node_get_nchildren(node);
@@ -1234,9 +1221,6 @@
 	if (!buffer_dirty(path[level].bp_sib_bh))
 		nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
 
-	unlock_buffer(path[level].bp_bh);
-	unlock_buffer(path[level].bp_sib_bh);
-
 	path[level + 1].bp_index++;
 	nilfs_btree_promote_key(btree, path, level + 1,
 				nilfs_btree_node_get_key(right, 0));
@@ -1255,9 +1239,6 @@
 
 	nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
 
-	lock_buffer(path[level].bp_bh);
-	lock_buffer(path[level].bp_sib_bh);
-
 	node = nilfs_btree_get_nonroot_node(path, level);
 	left = nilfs_btree_get_sib_node(path, level);
 
@@ -1268,9 +1249,6 @@
 	if (!buffer_dirty(path[level].bp_sib_bh))
 		nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
 
-	unlock_buffer(path[level].bp_bh);
-	unlock_buffer(path[level].bp_sib_bh);
-
 	nilfs_btnode_delete(path[level].bp_bh);
 	path[level].bp_bh = path[level].bp_sib_bh;
 	path[level].bp_sib_bh = NULL;
@@ -1286,9 +1264,6 @@
 
 	nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
 
-	lock_buffer(path[level].bp_bh);
-	lock_buffer(path[level].bp_sib_bh);
-
 	node = nilfs_btree_get_nonroot_node(path, level);
 	right = nilfs_btree_get_sib_node(path, level);
 
@@ -1299,9 +1274,6 @@
 	if (!buffer_dirty(path[level].bp_bh))
 		nilfs_btnode_mark_dirty(path[level].bp_bh);
 
-	unlock_buffer(path[level].bp_bh);
-	unlock_buffer(path[level].bp_sib_bh);
-
 	nilfs_btnode_delete(path[level].bp_sib_bh);
 	path[level].bp_sib_bh = NULL;
 	path[level + 1].bp_index++;
@@ -1316,7 +1288,6 @@
 
 	nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
 
-	lock_buffer(path[level].bp_bh);
 	root = nilfs_btree_get_root(btree);
 	child = nilfs_btree_get_nonroot_node(path, level);
 
@@ -1324,7 +1295,6 @@
 	nilfs_btree_node_set_level(root, level);
 	n = nilfs_btree_node_get_nchildren(child);
 	nilfs_btree_node_move_left(btree, root, child, n);
-	unlock_buffer(path[level].bp_bh);
 
 	nilfs_btnode_delete(path[level].bp_bh);
 	path[level].bp_bh = NULL;
@@ -1699,7 +1669,6 @@
 		nilfs_bmap_commit_alloc_ptr(bmap, nreq, dat);
 
 		/* create child node at level 1 */
-		lock_buffer(bh);
 		node = (struct nilfs_btree_node *)bh->b_data;
 		nilfs_btree_node_init(btree, node, 0, 1, n, keys, ptrs);
 		nilfs_btree_node_insert(btree, node,
@@ -1709,7 +1678,6 @@
 		if (!nilfs_bmap_dirty(bmap))
 			nilfs_bmap_set_dirty(bmap);
 
-		unlock_buffer(bh);
 		brelse(bh);
 
 		/* create root node at level 2 */
@@ -2050,7 +2018,7 @@
 	for (level = NILFS_BTREE_LEVEL_NODE_MIN;
 	     level < NILFS_BTREE_LEVEL_MAX;
 	     level++)
-		list_splice(&lists[level], listp->prev);
+		list_splice_tail(&lists[level], listp);
 }
 
 static int nilfs_btree_assign_p(struct nilfs_btree *btree,
diff --git a/fs/nilfs2/btree.h b/fs/nilfs2/btree.h
index 0e72bbb..4b82d84 100644
--- a/fs/nilfs2/btree.h
+++ b/fs/nilfs2/btree.h
@@ -34,28 +34,6 @@
 struct nilfs_btree_path;
 
 /**
- * struct nilfs_btree_node - B-tree node
- * @bn_flags: flags
- * @bn_level: level
- * @bn_nchildren: number of children
- * @bn_pad: padding
- */
-struct nilfs_btree_node {
-	__u8 bn_flags;
-	__u8 bn_level;
-	__le16 bn_nchildren;
-	__le32 bn_pad;
-};
-
-/* flags */
-#define NILFS_BTREE_NODE_ROOT	0x01
-
-/* level */
-#define NILFS_BTREE_LEVEL_DATA		0
-#define NILFS_BTREE_LEVEL_NODE_MIN	(NILFS_BTREE_LEVEL_DATA + 1)
-#define NILFS_BTREE_LEVEL_MAX		14
-
-/**
  * struct nilfs_btree - B-tree structure
  * @bt_bmap: bmap base structure
  */
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index 3f5d5d0..d5ad54e 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -926,3 +926,29 @@
 	up_read(&NILFS_MDT(cpfile)->mi_sem);
 	return ret;
 }
+
+/**
+ * nilfs_cpfile_read - read cpfile inode
+ * @cpfile: cpfile inode
+ * @raw_inode: on-disk cpfile inode
+ */
+int nilfs_cpfile_read(struct inode *cpfile, struct nilfs_inode *raw_inode)
+{
+	return nilfs_read_inode_common(cpfile, raw_inode);
+}
+
+/**
+ * nilfs_cpfile_new - create cpfile
+ * @nilfs: nilfs object
+ * @cpsize: size of a checkpoint entry
+ */
+struct inode *nilfs_cpfile_new(struct the_nilfs *nilfs, size_t cpsize)
+{
+	struct inode *cpfile;
+
+	cpfile = nilfs_mdt_new(nilfs, NULL, NILFS_CPFILE_INO, 0);
+	if (cpfile)
+		nilfs_mdt_set_entry_size(cpfile, cpsize,
+					 sizeof(struct nilfs_cpfile_header));
+	return cpfile;
+}
diff --git a/fs/nilfs2/cpfile.h b/fs/nilfs2/cpfile.h
index debea89..bc0809e 100644
--- a/fs/nilfs2/cpfile.h
+++ b/fs/nilfs2/cpfile.h
@@ -40,4 +40,7 @@
 ssize_t nilfs_cpfile_get_cpinfo(struct inode *, __u64 *, int, void *, unsigned,
 				size_t);
 
+int nilfs_cpfile_read(struct inode *cpfile, struct nilfs_inode *raw_inode);
+struct inode *nilfs_cpfile_new(struct the_nilfs *nilfs, size_t cpsize);
+
 #endif	/* _NILFS_CPFILE_H */
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index 1ff8e15..187dd07 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -33,6 +33,16 @@
 #define NILFS_CNO_MIN	((__u64)1)
 #define NILFS_CNO_MAX	(~(__u64)0)
 
+struct nilfs_dat_info {
+	struct nilfs_mdt_info mi;
+	struct nilfs_palloc_cache palloc_cache;
+};
+
+static inline struct nilfs_dat_info *NILFS_DAT_I(struct inode *dat)
+{
+	return (struct nilfs_dat_info *)NILFS_MDT(dat);
+}
+
 static int nilfs_dat_prepare_entry(struct inode *dat,
 				   struct nilfs_palloc_req *req, int create)
 {
@@ -425,3 +435,40 @@
 
 	return nvi;
 }
+
+/**
+ * nilfs_dat_read - read dat inode
+ * @dat: dat inode
+ * @raw_inode: on-disk dat inode
+ */
+int nilfs_dat_read(struct inode *dat, struct nilfs_inode *raw_inode)
+{
+	return nilfs_read_inode_common(dat, raw_inode);
+}
+
+/**
+ * nilfs_dat_new - create dat file
+ * @nilfs: nilfs object
+ * @entry_size: size of a dat entry
+ */
+struct inode *nilfs_dat_new(struct the_nilfs *nilfs, size_t entry_size)
+{
+	static struct lock_class_key dat_lock_key;
+	struct inode *dat;
+	struct nilfs_dat_info *di;
+	int err;
+
+	dat = nilfs_mdt_new(nilfs, NULL, NILFS_DAT_INO, sizeof(*di));
+	if (dat) {
+		err = nilfs_palloc_init_blockgroup(dat, entry_size);
+		if (unlikely(err)) {
+			nilfs_mdt_destroy(dat);
+			return NULL;
+		}
+
+		di = NILFS_DAT_I(dat);
+		lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
+		nilfs_palloc_setup_cache(dat, &di->palloc_cache);
+	}
+	return dat;
+}
diff --git a/fs/nilfs2/dat.h b/fs/nilfs2/dat.h
index 406070d..d31c3aa 100644
--- a/fs/nilfs2/dat.h
+++ b/fs/nilfs2/dat.h
@@ -53,4 +53,7 @@
 int nilfs_dat_move(struct inode *, __u64, sector_t);
 ssize_t nilfs_dat_get_vinfo(struct inode *, void *, unsigned, size_t);
 
+int nilfs_dat_read(struct inode *dat, struct nilfs_inode *raw_inode);
+struct inode *nilfs_dat_new(struct the_nilfs *nilfs, size_t entry_size);
+
 #endif	/* _NILFS_DAT_H */
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index e097099..76d803e 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -99,9 +99,9 @@
 				 NULL, nilfs_get_block);
 }
 
-static int nilfs_commit_chunk(struct page *page,
-			      struct address_space *mapping,
-			      unsigned from, unsigned to)
+static void nilfs_commit_chunk(struct page *page,
+			       struct address_space *mapping,
+			       unsigned from, unsigned to)
 {
 	struct inode *dir = mapping->host;
 	struct nilfs_sb_info *sbi = NILFS_SB(dir->i_sb);
@@ -112,15 +112,13 @@
 
 	nr_dirty = nilfs_page_count_clean_buffers(page, from, to);
 	copied = block_write_end(NULL, mapping, pos, len, len, page, NULL);
-	if (pos + copied > dir->i_size) {
+	if (pos + copied > dir->i_size)
 		i_size_write(dir, pos + copied);
-		mark_inode_dirty(dir);
-	}
 	if (IS_DIRSYNC(dir))
 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
 	err = nilfs_set_file_dirty(sbi, dir, nr_dirty);
+	WARN_ON(err); /* do not happen */
 	unlock_page(page);
-	return err;
 }
 
 static void nilfs_check_page(struct page *page)
@@ -455,11 +453,10 @@
 	BUG_ON(err);
 	de->inode = cpu_to_le64(inode->i_ino);
 	nilfs_set_de_type(de, inode);
-	err = nilfs_commit_chunk(page, mapping, from, to);
+	nilfs_commit_chunk(page, mapping, from, to);
 	nilfs_put_page(page);
 	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
 /*	NILFS_I(dir)->i_flags &= ~NILFS_BTREE_FL; */
-	mark_inode_dirty(dir);
 }
 
 /*
@@ -548,10 +545,10 @@
 	memcpy(de->name, name, namelen);
 	de->inode = cpu_to_le64(inode->i_ino);
 	nilfs_set_de_type(de, inode);
-	err = nilfs_commit_chunk(page, page->mapping, from, to);
+	nilfs_commit_chunk(page, page->mapping, from, to);
 	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
 /*	NILFS_I(dir)->i_flags &= ~NILFS_BTREE_FL; */
-	mark_inode_dirty(dir);
+	nilfs_mark_inode_dirty(dir);
 	/* OFFSET_CACHE */
 out_put:
 	nilfs_put_page(page);
@@ -595,10 +592,9 @@
 	if (pde)
 		pde->rec_len = cpu_to_le16(to - from);
 	dir->inode = 0;
-	err = nilfs_commit_chunk(page, mapping, from, to);
+	nilfs_commit_chunk(page, mapping, from, to);
 	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
 /*	NILFS_I(inode)->i_flags &= ~NILFS_BTREE_FL; */
-	mark_inode_dirty(inode);
 out:
 	nilfs_put_page(page);
 	return err;
@@ -640,7 +636,7 @@
 	memcpy(de->name, "..\0", 4);
 	nilfs_set_de_type(de, inode);
 	kunmap_atomic(kaddr, KM_USER0);
-	err = nilfs_commit_chunk(page, mapping, 0, chunk_size);
+	nilfs_commit_chunk(page, mapping, 0, chunk_size);
 fail:
 	page_cache_release(page);
 	return err;
diff --git a/fs/nilfs2/gcdat.c b/fs/nilfs2/gcdat.c
index 93383c5..dd5f7e0 100644
--- a/fs/nilfs2/gcdat.c
+++ b/fs/nilfs2/gcdat.c
@@ -61,6 +61,8 @@
 
 	nilfs_bmap_commit_gcdat(gii->i_bmap, dii->i_bmap);
 
+	nilfs_palloc_clear_cache(dat);
+	nilfs_palloc_clear_cache(gcdat);
 	nilfs_clear_dirty_pages(mapping);
 	nilfs_copy_back_pages(mapping, gmapping);
 	/* note: mdt dirty flags should be cleared by segctor. */
@@ -79,6 +81,7 @@
 	gcdat->i_state = I_CLEAR;
 	gii->i_flags = 0;
 
+	nilfs_palloc_clear_cache(gcdat);
 	truncate_inode_pages(gcdat->i_mapping, 0);
 	truncate_inode_pages(&gii->i_btnode_cache, 0);
 }
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index e6de0a2..e16a666 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -149,7 +149,7 @@
 				   __u64 vbn, struct buffer_head **out_bh)
 {
 	int ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache,
-					    vbn ? : pbn, pbn, out_bh, 0);
+					    vbn ? : pbn, pbn, out_bh);
 	if (ret == -EEXIST) /* internal code (cache hit) */
 		ret = 0;
 	return ret;
@@ -212,9 +212,10 @@
 static struct inode *alloc_gcinode(struct the_nilfs *nilfs, ino_t ino,
 				   __u64 cno)
 {
-	struct inode *inode = nilfs_mdt_new_common(nilfs, NULL, ino, GFP_NOFS);
+	struct inode *inode;
 	struct nilfs_inode_info *ii;
 
+	inode = nilfs_mdt_new_common(nilfs, NULL, ino, GFP_NOFS, 0);
 	if (!inode)
 		return NULL;
 
@@ -265,7 +266,6 @@
  */
 void nilfs_clear_gcinode(struct inode *inode)
 {
-	nilfs_mdt_clear(inode);
 	nilfs_mdt_destroy(inode);
 }
 
diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c
index de86401..922d9dd 100644
--- a/fs/nilfs2/ifile.c
+++ b/fs/nilfs2/ifile.c
@@ -29,6 +29,17 @@
 #include "alloc.h"
 #include "ifile.h"
 
+
+struct nilfs_ifile_info {
+	struct nilfs_mdt_info mi;
+	struct nilfs_palloc_cache palloc_cache;
+};
+
+static inline struct nilfs_ifile_info *NILFS_IFILE_I(struct inode *ifile)
+{
+	return (struct nilfs_ifile_info *)NILFS_MDT(ifile);
+}
+
 /**
  * nilfs_ifile_create_inode - create a new disk inode
  * @ifile: ifile inode
@@ -148,3 +159,27 @@
 	}
 	return err;
 }
+
+/**
+ * nilfs_ifile_new - create inode file
+ * @sbi: nilfs_sb_info struct
+ * @inode_size: size of an inode
+ */
+struct inode *nilfs_ifile_new(struct nilfs_sb_info *sbi, size_t inode_size)
+{
+	struct inode *ifile;
+	int err;
+
+	ifile = nilfs_mdt_new(sbi->s_nilfs, sbi->s_super, NILFS_IFILE_INO,
+			      sizeof(struct nilfs_ifile_info));
+	if (ifile) {
+		err = nilfs_palloc_init_blockgroup(ifile, inode_size);
+		if (unlikely(err)) {
+			nilfs_mdt_destroy(ifile);
+			return NULL;
+		}
+		nilfs_palloc_setup_cache(ifile,
+					 &NILFS_IFILE_I(ifile)->palloc_cache);
+	}
+	return ifile;
+}
diff --git a/fs/nilfs2/ifile.h b/fs/nilfs2/ifile.h
index ecc3ba76..cbca32e 100644
--- a/fs/nilfs2/ifile.h
+++ b/fs/nilfs2/ifile.h
@@ -49,4 +49,6 @@
 int nilfs_ifile_delete_inode(struct inode *, ino_t);
 int nilfs_ifile_get_inode_block(struct inode *, ino_t, struct buffer_head **);
 
+struct inode *nilfs_ifile_new(struct nilfs_sb_info *sbi, size_t inode_size);
+
 #endif	/* _NILFS_IFILE_H */
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 2a0a5a3..7868cc1 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -97,6 +97,7 @@
 			nilfs_transaction_abort(inode->i_sb);
 			goto out;
 		}
+		nilfs_mark_inode_dirty(inode);
 		nilfs_transaction_commit(inode->i_sb); /* never fails */
 		/* Error handling should be detailed */
 		set_buffer_new(bh_result);
@@ -322,7 +323,6 @@
 				    nilfs_init_acl(), proper cancellation of
 				    above jobs should be considered */
 
-	mark_inode_dirty(inode);
 	return inode;
 
  failed_acl:
@@ -525,7 +525,6 @@
 
 	raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, ibh);
 
-	/* The buffer is guarded with lock_buffer() by the caller */
 	if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
 		memset(raw_inode, 0, NILFS_MDT(sbi->s_ifile)->mi_entry_size);
 	set_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
@@ -599,6 +598,7 @@
 	if (IS_SYNC(inode))
 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
 
+	nilfs_mark_inode_dirty(inode);
 	nilfs_set_file_dirty(NILFS_SB(sb), inode, 0);
 	nilfs_transaction_commit(sb);
 	/* May construct a logical segment and may fail in sync mode.
@@ -623,6 +623,7 @@
 		truncate_inode_pages(&inode->i_data, 0);
 
 	nilfs_truncate_bmap(ii, 0);
+	nilfs_mark_inode_dirty(inode);
 	nilfs_free_inode(inode);
 	/* nilfs_free_inode() marks inode buffer dirty */
 	if (IS_SYNC(inode))
@@ -745,9 +746,7 @@
 			      "failed to reget inode block.\n");
 		return err;
 	}
-	lock_buffer(ibh);
 	nilfs_update_inode(inode, ibh);
-	unlock_buffer(ibh);
 	nilfs_mdt_mark_buffer_dirty(ibh);
 	nilfs_mdt_mark_dirty(sbi->s_ifile);
 	brelse(ibh);
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index f632611..06713ff 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -186,7 +186,7 @@
 }
 
 static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
-				struct buffer_head **out_bh)
+				int readahead, struct buffer_head **out_bh)
 {
 	struct buffer_head *first_bh, *bh;
 	unsigned long blkoff;
@@ -200,16 +200,18 @@
 	if (unlikely(err))
 		goto failed;
 
-	blkoff = block + 1;
-	for (i = 0; i < nr_ra_blocks; i++, blkoff++) {
-		err = nilfs_mdt_submit_block(inode, blkoff, READA, &bh);
-		if (likely(!err || err == -EEXIST))
-			brelse(bh);
-		else if (err != -EBUSY)
-			break; /* abort readahead if bmap lookup failed */
-
-		if (!buffer_locked(first_bh))
-			goto out_no_wait;
+	if (readahead) {
+		blkoff = block + 1;
+		for (i = 0; i < nr_ra_blocks; i++, blkoff++) {
+			err = nilfs_mdt_submit_block(inode, blkoff, READA, &bh);
+			if (likely(!err || err == -EEXIST))
+				brelse(bh);
+			else if (err != -EBUSY)
+				break;
+				/* abort readahead if bmap lookup failed */
+			if (!buffer_locked(first_bh))
+				goto out_no_wait;
+		}
 	}
 
 	wait_on_buffer(first_bh);
@@ -263,7 +265,7 @@
 
 	/* Should be rewritten with merging nilfs_mdt_read_block() */
  retry:
-	ret = nilfs_mdt_read_block(inode, blkoff, out_bh);
+	ret = nilfs_mdt_read_block(inode, blkoff, !create, out_bh);
 	if (!create || ret != -ENOENT)
 		return ret;
 
@@ -371,7 +373,7 @@
 	struct buffer_head *bh;
 	int err;
 
-	err = nilfs_mdt_read_block(inode, block, &bh);
+	err = nilfs_mdt_read_block(inode, block, 0, &bh);
 	if (unlikely(err))
 		return err;
 	nilfs_mark_buffer_dirty(bh);
@@ -445,9 +447,17 @@
  * longer than those of the super block structs; they may continue for
  * several consecutive mounts/umounts.  This would need discussions.
  */
+/**
+ * nilfs_mdt_new_common - allocate a pseudo inode for metadata file
+ * @nilfs: nilfs object
+ * @sb: super block instance the metadata file belongs to
+ * @ino: inode number
+ * @gfp_mask: gfp mask for data pages
+ * @objsz: size of the private object attached to inode->i_private
+ */
 struct inode *
 nilfs_mdt_new_common(struct the_nilfs *nilfs, struct super_block *sb,
-		     ino_t ino, gfp_t gfp_mask)
+		     ino_t ino, gfp_t gfp_mask, size_t objsz)
 {
 	struct inode *inode = nilfs_alloc_inode_common(nilfs);
 
@@ -455,8 +465,9 @@
 		return NULL;
 	else {
 		struct address_space * const mapping = &inode->i_data;
-		struct nilfs_mdt_info *mi = kzalloc(sizeof(*mi), GFP_NOFS);
+		struct nilfs_mdt_info *mi;
 
+		mi = kzalloc(max(sizeof(*mi), objsz), GFP_NOFS);
 		if (!mi) {
 			nilfs_destroy_inode(inode);
 			return NULL;
@@ -513,11 +524,11 @@
 }
 
 struct inode *nilfs_mdt_new(struct the_nilfs *nilfs, struct super_block *sb,
-			    ino_t ino)
+			    ino_t ino, size_t objsz)
 {
-	struct inode *inode = nilfs_mdt_new_common(nilfs, sb, ino,
-						   NILFS_MDT_GFP);
+	struct inode *inode;
 
+	inode = nilfs_mdt_new_common(nilfs, sb, ino, NILFS_MDT_GFP, objsz);
 	if (!inode)
 		return NULL;
 
@@ -544,14 +555,15 @@
 		&NILFS_I(orig)->i_btnode_cache;
 }
 
-void nilfs_mdt_clear(struct inode *inode)
+static void nilfs_mdt_clear(struct inode *inode)
 {
 	struct nilfs_inode_info *ii = NILFS_I(inode);
 
 	invalidate_mapping_pages(inode->i_mapping, 0, -1);
 	truncate_inode_pages(inode->i_mapping, 0);
 
-	nilfs_bmap_clear(ii->i_bmap);
+	if (test_bit(NILFS_I_BMAP, &ii->i_state))
+		nilfs_bmap_clear(ii->i_bmap);
 	nilfs_btnode_cache_clear(&ii->i_btnode_cache);
 }
 
@@ -559,6 +571,10 @@
 {
 	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
 
+	if (mdi->mi_palloc_cache)
+		nilfs_palloc_destroy_cache(inode);
+	nilfs_mdt_clear(inode);
+
 	kfree(mdi->mi_bgl); /* kfree(NULL) is safe */
 	kfree(mdi);
 	nilfs_destroy_inode(inode);
diff --git a/fs/nilfs2/mdt.h b/fs/nilfs2/mdt.h
index 4315997..6c4bbb0 100644
--- a/fs/nilfs2/mdt.h
+++ b/fs/nilfs2/mdt.h
@@ -36,6 +36,7 @@
  * @mi_entry_size: size of an entry
  * @mi_first_entry_offset: offset to the first entry
  * @mi_entries_per_block: number of entries in a block
+ * @mi_palloc_cache: persistent object allocator cache
  * @mi_blocks_per_group: number of blocks in a group
  * @mi_blocks_per_desc_block: number of blocks per descriptor block
  */
@@ -46,6 +47,7 @@
 	unsigned		mi_entry_size;
 	unsigned		mi_first_entry_offset;
 	unsigned long		mi_entries_per_block;
+	struct nilfs_palloc_cache *mi_palloc_cache;
 	unsigned long		mi_blocks_per_group;
 	unsigned long		mi_blocks_per_desc_block;
 };
@@ -74,11 +76,11 @@
 int nilfs_mdt_mark_block_dirty(struct inode *, unsigned long);
 int nilfs_mdt_fetch_dirty(struct inode *);
 
-struct inode *nilfs_mdt_new(struct the_nilfs *, struct super_block *, ino_t);
+struct inode *nilfs_mdt_new(struct the_nilfs *, struct super_block *, ino_t,
+			    size_t);
 struct inode *nilfs_mdt_new_common(struct the_nilfs *, struct super_block *,
-				   ino_t, gfp_t);
+				   ino_t, gfp_t, size_t);
 void nilfs_mdt_destroy(struct inode *);
-void nilfs_mdt_clear(struct inode *);
 void nilfs_mdt_set_entry_size(struct inode *, unsigned, unsigned);
 void nilfs_mdt_set_shadow(struct inode *, struct inode *);
 
@@ -104,21 +106,4 @@
 #define nilfs_mdt_bgl_lock(inode, bg) \
 	(&NILFS_MDT(inode)->mi_bgl->locks[(bg) & (NR_BG_LOCKS-1)].lock)
 
-
-static inline int
-nilfs_mdt_read_inode_direct(struct inode *inode, struct buffer_head *bh,
-			    unsigned n)
-{
-	return nilfs_read_inode_common(
-		inode, (struct nilfs_inode *)(bh->b_data + n));
-}
-
-static inline void
-nilfs_mdt_write_inode_direct(struct inode *inode, struct buffer_head *bh,
-			     unsigned n)
-{
-	nilfs_write_inode_common(
-		inode, (struct nilfs_inode *)(bh->b_data + n), 1);
-}
-
 #endif /* _NILFS_MDT_H */
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index ed02e88..07ba838 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -120,7 +120,7 @@
 		inode->i_op = &nilfs_file_inode_operations;
 		inode->i_fop = &nilfs_file_operations;
 		inode->i_mapping->a_ops = &nilfs_aops;
-		mark_inode_dirty(inode);
+		nilfs_mark_inode_dirty(inode);
 		err = nilfs_add_nondir(dentry, inode);
 	}
 	if (!err)
@@ -148,7 +148,7 @@
 	err = PTR_ERR(inode);
 	if (!IS_ERR(inode)) {
 		init_special_inode(inode, inode->i_mode, rdev);
-		mark_inode_dirty(inode);
+		nilfs_mark_inode_dirty(inode);
 		err = nilfs_add_nondir(dentry, inode);
 	}
 	if (!err)
@@ -188,7 +188,7 @@
 		goto out_fail;
 
 	/* mark_inode_dirty(inode); */
-	/* nilfs_new_inode() and page_symlink() do this */
+	/* page_symlink() do this */
 
 	err = nilfs_add_nondir(dentry, inode);
 out:
@@ -200,7 +200,8 @@
 	return err;
 
 out_fail:
-	inode_dec_link_count(inode);
+	drop_nlink(inode);
+	nilfs_mark_inode_dirty(inode);
 	iput(inode);
 	goto out;
 }
@@ -245,7 +246,7 @@
 	if (err)
 		return err;
 
-	inode_inc_link_count(dir);
+	inc_nlink(dir);
 
 	inode = nilfs_new_inode(dir, S_IFDIR | mode);
 	err = PTR_ERR(inode);
@@ -256,7 +257,7 @@
 	inode->i_fop = &nilfs_dir_operations;
 	inode->i_mapping->a_ops = &nilfs_aops;
 
-	inode_inc_link_count(inode);
+	inc_nlink(inode);
 
 	err = nilfs_make_empty(inode, dir);
 	if (err)
@@ -266,6 +267,7 @@
 	if (err)
 		goto out_fail;
 
+	nilfs_mark_inode_dirty(inode);
 	d_instantiate(dentry, inode);
 out:
 	if (!err)
@@ -276,26 +278,23 @@
 	return err;
 
 out_fail:
-	inode_dec_link_count(inode);
-	inode_dec_link_count(inode);
+	drop_nlink(inode);
+	drop_nlink(inode);
+	nilfs_mark_inode_dirty(inode);
 	iput(inode);
 out_dir:
-	inode_dec_link_count(dir);
+	drop_nlink(dir);
+	nilfs_mark_inode_dirty(dir);
 	goto out;
 }
 
-static int nilfs_unlink(struct inode *dir, struct dentry *dentry)
+static int nilfs_do_unlink(struct inode *dir, struct dentry *dentry)
 {
 	struct inode *inode;
 	struct nilfs_dir_entry *de;
 	struct page *page;
-	struct nilfs_transaction_info ti;
 	int err;
 
-	err = nilfs_transaction_begin(dir->i_sb, &ti, 0);
-	if (err)
-		return err;
-
 	err = -ENOENT;
 	de = nilfs_find_entry(dir, dentry, &page);
 	if (!de)
@@ -317,12 +316,28 @@
 		goto out;
 
 	inode->i_ctime = dir->i_ctime;
-	inode_dec_link_count(inode);
+	drop_nlink(inode);
 	err = 0;
 out:
-	if (!err)
+	return err;
+}
+
+static int nilfs_unlink(struct inode *dir, struct dentry *dentry)
+{
+	struct nilfs_transaction_info ti;
+	int err;
+
+	err = nilfs_transaction_begin(dir->i_sb, &ti, 0);
+	if (err)
+		return err;
+
+	err = nilfs_do_unlink(dir, dentry);
+
+	if (!err) {
+		nilfs_mark_inode_dirty(dir);
+		nilfs_mark_inode_dirty(dentry->d_inode);
 		err = nilfs_transaction_commit(dir->i_sb);
-	else
+	} else
 		nilfs_transaction_abort(dir->i_sb);
 
 	return err;
@@ -340,11 +355,13 @@
 
 	err = -ENOTEMPTY;
 	if (nilfs_empty_dir(inode)) {
-		err = nilfs_unlink(dir, dentry);
+		err = nilfs_do_unlink(dir, dentry);
 		if (!err) {
 			inode->i_size = 0;
-			inode_dec_link_count(inode);
-			inode_dec_link_count(dir);
+			drop_nlink(inode);
+			nilfs_mark_inode_dirty(inode);
+			drop_nlink(dir);
+			nilfs_mark_inode_dirty(dir);
 		}
 	}
 	if (!err)
@@ -395,42 +412,48 @@
 		new_de = nilfs_find_entry(new_dir, new_dentry, &new_page);
 		if (!new_de)
 			goto out_dir;
-		inode_inc_link_count(old_inode);
+		inc_nlink(old_inode);
 		nilfs_set_link(new_dir, new_de, new_page, old_inode);
+		nilfs_mark_inode_dirty(new_dir);
 		new_inode->i_ctime = CURRENT_TIME;
 		if (dir_de)
 			drop_nlink(new_inode);
-		inode_dec_link_count(new_inode);
+		drop_nlink(new_inode);
+		nilfs_mark_inode_dirty(new_inode);
 	} else {
 		if (dir_de) {
 			err = -EMLINK;
 			if (new_dir->i_nlink >= NILFS_LINK_MAX)
 				goto out_dir;
 		}
-		inode_inc_link_count(old_inode);
+		inc_nlink(old_inode);
 		err = nilfs_add_link(new_dentry, old_inode);
 		if (err) {
-			inode_dec_link_count(old_inode);
+			drop_nlink(old_inode);
+			nilfs_mark_inode_dirty(old_inode);
 			goto out_dir;
 		}
-		if (dir_de)
-			inode_inc_link_count(new_dir);
+		if (dir_de) {
+			inc_nlink(new_dir);
+			nilfs_mark_inode_dirty(new_dir);
+		}
 	}
 
 	/*
 	 * Like most other Unix systems, set the ctime for inodes on a
 	 * rename.
-	 * inode_dec_link_count() will mark the inode dirty.
 	 */
 	old_inode->i_ctime = CURRENT_TIME;
 
 	nilfs_delete_entry(old_de, old_page);
-	inode_dec_link_count(old_inode);
+	drop_nlink(old_inode);
 
 	if (dir_de) {
 		nilfs_set_link(old_inode, dir_de, dir_page, new_dir);
-		inode_dec_link_count(old_dir);
+		drop_nlink(old_dir);
 	}
+	nilfs_mark_inode_dirty(old_dir);
+	nilfs_mark_inode_dirty(old_inode);
 
 	err = nilfs_transaction_commit(old_dir->i_sb);
 	return err;
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index 6dc8359..c9c96c7 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -770,14 +770,8 @@
 		nilfs_finish_roll_forward(nilfs, sbi, ri);
 	}
 
-	nilfs_detach_checkpoint(sbi);
-	return 0;
-
  failed:
 	nilfs_detach_checkpoint(sbi);
-	nilfs_mdt_clear(nilfs->ns_cpfile);
-	nilfs_mdt_clear(nilfs->ns_sufile);
-	nilfs_mdt_clear(nilfs->ns_dat);
 	return err;
 }
 
@@ -804,6 +798,7 @@
 	struct nilfs_segsum_info ssi;
 	sector_t pseg_start, pseg_end, sr_pseg_start = 0;
 	sector_t seg_start, seg_end; /* range of full segment (block number) */
+	sector_t b, end;
 	u64 seg_seq;
 	__u64 segnum, nextnum = 0;
 	__u64 cno;
@@ -819,6 +814,11 @@
 	/* Calculate range of segment */
 	nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
 
+	/* Read ahead segment */
+	b = seg_start;
+	while (b <= seg_end)
+		sb_breadahead(sbi->s_super, b++);
+
 	for (;;) {
 		/* Load segment summary */
 		ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi, 1);
@@ -841,14 +841,20 @@
 		ri->ri_nextnum = nextnum;
 		empty_seg = 0;
 
+		if (!NILFS_SEG_HAS_SR(&ssi) && !scan_newer) {
+			/* This will never happen because a superblock
+			   (last_segment) always points to a pseg
+			   having a super root. */
+			ret = NILFS_SEG_FAIL_CONSISTENCY;
+			goto failed;
+		}
+
+		if (pseg_start == seg_start) {
+			nilfs_get_segment_range(nilfs, nextnum, &b, &end);
+			while (b <= end)
+				sb_breadahead(sbi->s_super, b++);
+		}
 		if (!NILFS_SEG_HAS_SR(&ssi)) {
-			if (!scan_newer) {
-				/* This will never happen because a superblock
-				   (last_segment) always points to a pseg
-				   having a super root. */
-				ret = NILFS_SEG_FAIL_CONSISTENCY;
-				goto failed;
-			}
 			if (!ri->ri_lsegs_start && NILFS_SEG_LOGBGN(&ssi)) {
 				ri->ri_lsegs_start = pseg_start;
 				ri->ri_lsegs_start_seq = seg_seq;
@@ -919,7 +925,7 @@
 
  super_root_found:
 	/* Updating pointers relating to the latest checkpoint */
-	list_splice(&segments, ri->ri_used_segments.prev);
+	list_splice_tail(&segments, &ri->ri_used_segments);
 	nilfs->ns_last_pseg = sr_pseg_start;
 	nilfs->ns_last_seq = nilfs->ns_seg_seq;
 	nilfs->ns_last_cno = ri->ri_cno;
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index e6d9e37..645c786 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -24,10 +24,22 @@
 #include <linux/buffer_head.h>
 #include <linux/writeback.h>
 #include <linux/crc32.h>
+#include <linux/backing-dev.h>
 #include "page.h"
 #include "segbuf.h"
 
 
+struct nilfs_write_info {
+	struct the_nilfs       *nilfs;
+	struct bio	       *bio;
+	int 			start, end; /* The region to be submitted */
+	int			rest_blocks;
+	int			max_pages;
+	int			nr_vecs;
+	sector_t		blocknr;
+};
+
+
 static struct kmem_cache *nilfs_segbuf_cachep;
 
 static void nilfs_segbuf_init_once(void *obj)
@@ -63,6 +75,11 @@
 	INIT_LIST_HEAD(&segbuf->sb_list);
 	INIT_LIST_HEAD(&segbuf->sb_segsum_buffers);
 	INIT_LIST_HEAD(&segbuf->sb_payload_buffers);
+
+	init_completion(&segbuf->sb_bio_event);
+	atomic_set(&segbuf->sb_err, 0);
+	segbuf->sb_nbio = 0;
+
 	return segbuf;
 }
 
@@ -83,6 +100,22 @@
 		segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1;
 }
 
+/**
+ * nilfs_segbuf_map_cont - map a new log behind a given log
+ * @segbuf: new segment buffer
+ * @prev: segment buffer containing a log to be continued
+ */
+void nilfs_segbuf_map_cont(struct nilfs_segment_buffer *segbuf,
+			   struct nilfs_segment_buffer *prev)
+{
+	segbuf->sb_segnum = prev->sb_segnum;
+	segbuf->sb_fseg_start = prev->sb_fseg_start;
+	segbuf->sb_fseg_end = prev->sb_fseg_end;
+	segbuf->sb_pseg_start = prev->sb_pseg_start + prev->sb_sum.nblocks;
+	segbuf->sb_rest_blocks =
+		segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1;
+}
+
 void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *segbuf,
 				  __u64 nextnum, struct the_nilfs *nilfs)
 {
@@ -132,8 +165,6 @@
 	segbuf->sb_sum.sumbytes = sizeof(struct nilfs_segment_summary);
 	segbuf->sb_sum.nfinfo = segbuf->sb_sum.nfileblk = 0;
 	segbuf->sb_sum.ctime = ctime;
-
-	segbuf->sb_io_error = 0;
 	return 0;
 }
 
@@ -219,7 +250,7 @@
 	raw_sum->ss_datasum = cpu_to_le32(crc);
 }
 
-void nilfs_release_buffers(struct list_head *list)
+static void nilfs_release_buffers(struct list_head *list)
 {
 	struct buffer_head *bh, *n;
 
@@ -241,13 +272,56 @@
 	}
 }
 
+static void nilfs_segbuf_clear(struct nilfs_segment_buffer *segbuf)
+{
+	nilfs_release_buffers(&segbuf->sb_segsum_buffers);
+	nilfs_release_buffers(&segbuf->sb_payload_buffers);
+}
+
+/*
+ * Iterators for segment buffers
+ */
+void nilfs_clear_logs(struct list_head *logs)
+{
+	struct nilfs_segment_buffer *segbuf;
+
+	list_for_each_entry(segbuf, logs, sb_list)
+		nilfs_segbuf_clear(segbuf);
+}
+
+void nilfs_truncate_logs(struct list_head *logs,
+			 struct nilfs_segment_buffer *last)
+{
+	struct nilfs_segment_buffer *n, *segbuf;
+
+	segbuf = list_prepare_entry(last, logs, sb_list);
+	list_for_each_entry_safe_continue(segbuf, n, logs, sb_list) {
+		list_del_init(&segbuf->sb_list);
+		nilfs_segbuf_clear(segbuf);
+		nilfs_segbuf_free(segbuf);
+	}
+}
+
+int nilfs_wait_on_logs(struct list_head *logs)
+{
+	struct nilfs_segment_buffer *segbuf;
+	int err;
+
+	list_for_each_entry(segbuf, logs, sb_list) {
+		err = nilfs_segbuf_wait(segbuf);
+		if (err)
+			return err;
+	}
+	return 0;
+}
+
 /*
  * BIO operations
  */
 static void nilfs_end_bio_write(struct bio *bio, int err)
 {
 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-	struct nilfs_write_info *wi = bio->bi_private;
+	struct nilfs_segment_buffer *segbuf = bio->bi_private;
 
 	if (err == -EOPNOTSUPP) {
 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
@@ -256,21 +330,22 @@
 	}
 
 	if (!uptodate)
-		atomic_inc(&wi->err);
+		atomic_inc(&segbuf->sb_err);
 
 	bio_put(bio);
-	complete(&wi->bio_event);
+	complete(&segbuf->sb_bio_event);
 }
 
-static int nilfs_submit_seg_bio(struct nilfs_write_info *wi, int mode)
+static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
+				   struct nilfs_write_info *wi, int mode)
 {
 	struct bio *bio = wi->bio;
 	int err;
 
-	if (wi->nbio > 0 && bdi_write_congested(wi->bdi)) {
-		wait_for_completion(&wi->bio_event);
-		wi->nbio--;
-		if (unlikely(atomic_read(&wi->err))) {
+	if (segbuf->sb_nbio > 0 && bdi_write_congested(wi->nilfs->ns_bdi)) {
+		wait_for_completion(&segbuf->sb_bio_event);
+		segbuf->sb_nbio--;
+		if (unlikely(atomic_read(&segbuf->sb_err))) {
 			bio_put(bio);
 			err = -EIO;
 			goto failed;
@@ -278,7 +353,7 @@
 	}
 
 	bio->bi_end_io = nilfs_end_bio_write;
-	bio->bi_private = wi;
+	bio->bi_private = segbuf;
 	bio_get(bio);
 	submit_bio(mode, bio);
 	if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
@@ -286,7 +361,7 @@
 		err = -EOPNOTSUPP;
 		goto failed;
 	}
-	wi->nbio++;
+	segbuf->sb_nbio++;
 	bio_put(bio);
 
 	wi->bio = NULL;
@@ -301,17 +376,15 @@
 }
 
 /**
- * nilfs_alloc_seg_bio - allocate a bio for writing segment.
- * @sb: super block
- * @start: beginning disk block number of this BIO.
+ * nilfs_alloc_seg_bio - allocate a new bio for writing log
+ * @nilfs: nilfs object
+ * @start: start block number of the bio
  * @nr_vecs: request size of page vector.
  *
- * alloc_seg_bio() allocates a new BIO structure and initialize it.
- *
  * Return Value: On success, pointer to the struct bio is returned.
  * On error, NULL is returned.
  */
-static struct bio *nilfs_alloc_seg_bio(struct super_block *sb, sector_t start,
+static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start,
 				       int nr_vecs)
 {
 	struct bio *bio;
@@ -322,36 +395,33 @@
 			bio = bio_alloc(GFP_NOIO, nr_vecs);
 	}
 	if (likely(bio)) {
-		bio->bi_bdev = sb->s_bdev;
-		bio->bi_sector = (sector_t)start << (sb->s_blocksize_bits - 9);
+		bio->bi_bdev = nilfs->ns_bdev;
+		bio->bi_sector = start << (nilfs->ns_blocksize_bits - 9);
 	}
 	return bio;
 }
 
-void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf,
-				struct nilfs_write_info *wi)
+static void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf,
+				       struct nilfs_write_info *wi)
 {
 	wi->bio = NULL;
 	wi->rest_blocks = segbuf->sb_sum.nblocks;
-	wi->max_pages = bio_get_nr_vecs(wi->sb->s_bdev);
+	wi->max_pages = bio_get_nr_vecs(wi->nilfs->ns_bdev);
 	wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
 	wi->start = wi->end = 0;
-	wi->nbio = 0;
 	wi->blocknr = segbuf->sb_pseg_start;
-
-	atomic_set(&wi->err, 0);
-	init_completion(&wi->bio_event);
 }
 
-static int nilfs_submit_bh(struct nilfs_write_info *wi, struct buffer_head *bh,
-			   int mode)
+static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf,
+				  struct nilfs_write_info *wi,
+				  struct buffer_head *bh, int mode)
 {
 	int len, err;
 
 	BUG_ON(wi->nr_vecs <= 0);
  repeat:
 	if (!wi->bio) {
-		wi->bio = nilfs_alloc_seg_bio(wi->sb, wi->blocknr + wi->end,
+		wi->bio = nilfs_alloc_seg_bio(wi->nilfs, wi->blocknr + wi->end,
 					      wi->nr_vecs);
 		if (unlikely(!wi->bio))
 			return -ENOMEM;
@@ -363,76 +433,83 @@
 		return 0;
 	}
 	/* bio is FULL */
-	err = nilfs_submit_seg_bio(wi, mode);
+	err = nilfs_segbuf_submit_bio(segbuf, wi, mode);
 	/* never submit current bh */
 	if (likely(!err))
 		goto repeat;
 	return err;
 }
 
+/**
+ * nilfs_segbuf_write - submit write requests of a log
+ * @segbuf: buffer storing a log to be written
+ * @nilfs: nilfs object
+ *
+ * Return Value: On Success, 0 is returned. On Error, one of the following
+ * negative error code is returned.
+ *
+ * %-EIO - I/O error
+ *
+ * %-ENOMEM - Insufficient memory available.
+ */
 int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
-		       struct nilfs_write_info *wi)
+		       struct the_nilfs *nilfs)
 {
+	struct nilfs_write_info wi;
 	struct buffer_head *bh;
-	int res, rw = WRITE;
+	int res = 0, rw = WRITE;
+
+	wi.nilfs = nilfs;
+	nilfs_segbuf_prepare_write(segbuf, &wi);
 
 	list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) {
-		res = nilfs_submit_bh(wi, bh, rw);
+		res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw);
 		if (unlikely(res))
 			goto failed_bio;
 	}
 
 	list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
-		res = nilfs_submit_bh(wi, bh, rw);
+		res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw);
 		if (unlikely(res))
 			goto failed_bio;
 	}
 
-	if (wi->bio) {
+	if (wi.bio) {
 		/*
 		 * Last BIO is always sent through the following
 		 * submission.
 		 */
 		rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
-		res = nilfs_submit_seg_bio(wi, rw);
-		if (unlikely(res))
-			goto failed_bio;
+		res = nilfs_segbuf_submit_bio(segbuf, &wi, rw);
 	}
 
-	res = 0;
- out:
-	return res;
-
  failed_bio:
-	atomic_inc(&wi->err);
-	goto out;
+	return res;
 }
 
 /**
  * nilfs_segbuf_wait - wait for completion of requested BIOs
- * @wi: nilfs_write_info
+ * @segbuf: segment buffer
  *
  * Return Value: On Success, 0 is returned. On Error, one of the following
  * negative error code is returned.
  *
  * %-EIO - I/O error
  */
-int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf,
-		      struct nilfs_write_info *wi)
+int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf)
 {
 	int err = 0;
 
-	if (!wi->nbio)
+	if (!segbuf->sb_nbio)
 		return 0;
 
 	do {
-		wait_for_completion(&wi->bio_event);
-	} while (--wi->nbio > 0);
+		wait_for_completion(&segbuf->sb_bio_event);
+	} while (--segbuf->sb_nbio > 0);
 
-	if (unlikely(atomic_read(&wi->err) > 0)) {
+	if (unlikely(atomic_read(&segbuf->sb_err) > 0)) {
 		printk(KERN_ERR "NILFS: IO error writing segment\n");
 		err = -EIO;
-		segbuf->sb_io_error = 1;
 	}
 	return err;
 }
diff --git a/fs/nilfs2/segbuf.h b/fs/nilfs2/segbuf.h
index 0c3076f..6af1630 100644
--- a/fs/nilfs2/segbuf.h
+++ b/fs/nilfs2/segbuf.h
@@ -27,7 +27,6 @@
 #include <linux/buffer_head.h>
 #include <linux/bio.h>
 #include <linux/completion.h>
-#include <linux/backing-dev.h>
 
 /**
  * struct nilfs_segsum_info - On-memory segment summary
@@ -77,7 +76,9 @@
  * @sb_rest_blocks: Number of residual blocks in the current segment
  * @sb_segsum_buffers: List of buffers for segment summaries
  * @sb_payload_buffers: List of buffers for segment payload
- * @sb_io_error: I/O error status
+ * @sb_nbio: Number of flying bio requests
+ * @sb_err: I/O error status
+ * @sb_bio_event: Completion event of log writing
  */
 struct nilfs_segment_buffer {
 	struct super_block     *sb_super;
@@ -96,7 +97,9 @@
 	struct list_head	sb_payload_buffers; /* including super root */
 
 	/* io status */
-	int			sb_io_error;
+	int			sb_nbio;
+	atomic_t		sb_err;
+	struct completion	sb_bio_event;
 };
 
 #define NILFS_LIST_SEGBUF(head)  \
@@ -125,6 +128,8 @@
 void nilfs_segbuf_free(struct nilfs_segment_buffer *);
 void nilfs_segbuf_map(struct nilfs_segment_buffer *, __u64, unsigned long,
 		      struct the_nilfs *);
+void nilfs_segbuf_map_cont(struct nilfs_segment_buffer *segbuf,
+			   struct nilfs_segment_buffer *prev);
 void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *, __u64,
 				  struct the_nilfs *);
 int nilfs_segbuf_reset(struct nilfs_segment_buffer *, unsigned, time_t);
@@ -161,41 +166,18 @@
 	segbuf->sb_sum.nfileblk++;
 }
 
-void nilfs_release_buffers(struct list_head *);
+int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
+		       struct the_nilfs *nilfs);
+int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf);
 
-static inline void nilfs_segbuf_clear(struct nilfs_segment_buffer *segbuf)
+void nilfs_clear_logs(struct list_head *logs);
+void nilfs_truncate_logs(struct list_head *logs,
+			 struct nilfs_segment_buffer *last);
+int nilfs_wait_on_logs(struct list_head *logs);
+
+static inline void nilfs_destroy_logs(struct list_head *logs)
 {
-	nilfs_release_buffers(&segbuf->sb_segsum_buffers);
-	nilfs_release_buffers(&segbuf->sb_payload_buffers);
+	nilfs_truncate_logs(logs, NULL);
 }
 
-struct nilfs_write_info {
-	struct bio	       *bio;
-	int 			start, end; /* The region to be submitted */
-	int			rest_blocks;
-	int			max_pages;
-	int			nr_vecs;
-	sector_t		blocknr;
-
-	int			nbio;
-	atomic_t		err;
-	struct completion	bio_event;
-				/* completion event of segment write */
-
-	/*
-	 * The following fields must be set explicitly
-	 */
-	struct super_block     *sb;
-	struct backing_dev_info *bdi; /* backing dev info */
-	struct buffer_head     *bh_sr;
-};
-
-
-void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *,
-				struct nilfs_write_info *);
-int nilfs_segbuf_write(struct nilfs_segment_buffer *,
-		       struct nilfs_write_info *);
-int nilfs_segbuf_wait(struct nilfs_segment_buffer *,
-		      struct nilfs_write_info *);
-
 #endif /* _NILFS_SEGBUF_H */
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 6eff66a..17584c5 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -974,12 +974,12 @@
 			      nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
 	raw_sr->sr_flags = 0;
 
-	nilfs_mdt_write_inode_direct(
-		nilfs_dat_inode(nilfs), bh_sr, NILFS_SR_DAT_OFFSET(isz));
-	nilfs_mdt_write_inode_direct(
-		nilfs->ns_cpfile, bh_sr, NILFS_SR_CPFILE_OFFSET(isz));
-	nilfs_mdt_write_inode_direct(
-		nilfs->ns_sufile, bh_sr, NILFS_SR_SUFILE_OFFSET(isz));
+	nilfs_write_inode_common(nilfs_dat_inode(nilfs), (void *)raw_sr +
+				 NILFS_SR_DAT_OFFSET(isz), 1);
+	nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
+				 NILFS_SR_CPFILE_OFFSET(isz), 1);
+	nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
+				 NILFS_SR_SUFILE_OFFSET(isz), 1);
 }
 
 static void nilfs_redirty_inodes(struct list_head *head)
@@ -1273,73 +1273,75 @@
 	return err;
 }
 
-static int nilfs_touch_segusage(struct inode *sufile, __u64 segnum)
-{
-	struct buffer_head *bh_su;
-	struct nilfs_segment_usage *raw_su;
-	int err;
-
-	err = nilfs_sufile_get_segment_usage(sufile, segnum, &raw_su, &bh_su);
-	if (unlikely(err))
-		return err;
-	nilfs_mdt_mark_buffer_dirty(bh_su);
-	nilfs_mdt_mark_dirty(sufile);
-	nilfs_sufile_put_segment_usage(sufile, segnum, bh_su);
-	return 0;
-}
-
+/**
+ * nilfs_segctor_begin_construction - setup segment buffer to make a new log
+ * @sci: nilfs_sc_info
+ * @nilfs: nilfs object
+ */
 static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
 					    struct the_nilfs *nilfs)
 {
-	struct nilfs_segment_buffer *segbuf, *n;
+	struct nilfs_segment_buffer *segbuf, *prev;
 	__u64 nextnum;
-	int err;
+	int err, alloc = 0;
 
-	if (list_empty(&sci->sc_segbufs)) {
-		segbuf = nilfs_segbuf_new(sci->sc_super);
-		if (unlikely(!segbuf))
-			return -ENOMEM;
-		list_add(&segbuf->sb_list, &sci->sc_segbufs);
-	} else
-		segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
+	segbuf = nilfs_segbuf_new(sci->sc_super);
+	if (unlikely(!segbuf))
+		return -ENOMEM;
 
-	nilfs_segbuf_map(segbuf, nilfs->ns_segnum, nilfs->ns_pseg_offset,
-			 nilfs);
+	if (list_empty(&sci->sc_write_logs)) {
+		nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
+				 nilfs->ns_pseg_offset, nilfs);
+		if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
+			nilfs_shift_to_next_segment(nilfs);
+			nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
+		}
 
-	if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
-		nilfs_shift_to_next_segment(nilfs);
-		nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
-	}
-	sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
-
-	err = nilfs_touch_segusage(nilfs->ns_sufile, segbuf->sb_segnum);
-	if (unlikely(err))
-		return err;
-
-	if (nilfs->ns_segnum == nilfs->ns_nextnum) {
-		/* Start from the head of a new full segment */
-		err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
-		if (unlikely(err))
-			return err;
-	} else
+		segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
 		nextnum = nilfs->ns_nextnum;
 
-	segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
+		if (nilfs->ns_segnum == nilfs->ns_nextnum)
+			/* Start from the head of a new full segment */
+			alloc++;
+	} else {
+		/* Continue logs */
+		prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
+		nilfs_segbuf_map_cont(segbuf, prev);
+		segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
+		nextnum = prev->sb_nextnum;
+
+		if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
+			nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
+			segbuf->sb_sum.seg_seq++;
+			alloc++;
+		}
+	}
+
+	err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
+	if (err)
+		goto failed;
+
+	if (alloc) {
+		err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
+		if (err)
+			goto failed;
+	}
 	nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
 
-	/* truncating segment buffers */
-	list_for_each_entry_safe_continue(segbuf, n, &sci->sc_segbufs,
-					  sb_list) {
-		list_del_init(&segbuf->sb_list);
-		nilfs_segbuf_free(segbuf);
-	}
+	BUG_ON(!list_empty(&sci->sc_segbufs));
+	list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
+	sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
 	return 0;
+
+ failed:
+	nilfs_segbuf_free(segbuf);
+	return err;
 }
 
 static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
 					 struct the_nilfs *nilfs, int nadd)
 {
-	struct nilfs_segment_buffer *segbuf, *prev, *n;
+	struct nilfs_segment_buffer *segbuf, *prev;
 	struct inode *sufile = nilfs->ns_sufile;
 	__u64 nextnextnum;
 	LIST_HEAD(list);
@@ -1352,7 +1354,7 @@
 	 * not be dirty.  The following call ensures that the buffer is dirty
 	 * and will pin the buffer on memory until the sufile is written.
 	 */
-	err = nilfs_touch_segusage(sufile, prev->sb_nextnum);
+	err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
 	if (unlikely(err))
 		return err;
 
@@ -1378,33 +1380,33 @@
 		list_add_tail(&segbuf->sb_list, &list);
 		prev = segbuf;
 	}
-	list_splice(&list, sci->sc_segbufs.prev);
+	list_splice_tail(&list, &sci->sc_segbufs);
 	return 0;
 
  failed_segbuf:
 	nilfs_segbuf_free(segbuf);
  failed:
-	list_for_each_entry_safe(segbuf, n, &list, sb_list) {
+	list_for_each_entry(segbuf, &list, sb_list) {
 		ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
 		WARN_ON(ret); /* never fails */
-		list_del_init(&segbuf->sb_list);
-		nilfs_segbuf_free(segbuf);
 	}
+	nilfs_destroy_logs(&list);
 	return err;
 }
 
-static void nilfs_segctor_free_incomplete_segments(struct nilfs_sc_info *sci,
-						   struct the_nilfs *nilfs)
+static void nilfs_free_incomplete_logs(struct list_head *logs,
+				       struct the_nilfs *nilfs)
 {
-	struct nilfs_segment_buffer *segbuf;
-	int ret, done = 0;
+	struct nilfs_segment_buffer *segbuf, *prev;
+	struct inode *sufile = nilfs->ns_sufile;
+	int ret;
 
-	segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
+	segbuf = NILFS_FIRST_SEGBUF(logs);
 	if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
-		ret = nilfs_sufile_free(nilfs->ns_sufile, segbuf->sb_nextnum);
+		ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
 		WARN_ON(ret); /* never fails */
 	}
-	if (segbuf->sb_io_error) {
+	if (atomic_read(&segbuf->sb_err)) {
 		/* Case 1: The first segment failed */
 		if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
 			/* Case 1a:  Partial segment appended into an existing
@@ -1413,106 +1415,54 @@
 						segbuf->sb_fseg_end);
 		else /* Case 1b:  New full segment */
 			set_nilfs_discontinued(nilfs);
-		done++;
 	}
 
-	list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
-		ret = nilfs_sufile_free(nilfs->ns_sufile, segbuf->sb_nextnum);
-		WARN_ON(ret); /* never fails */
-		if (!done && segbuf->sb_io_error) {
-			if (segbuf->sb_segnum != nilfs->ns_nextnum)
-				/* Case 2: extended segment (!= next) failed */
-				nilfs_sufile_set_error(nilfs->ns_sufile,
-						       segbuf->sb_segnum);
-			done++;
+	prev = segbuf;
+	list_for_each_entry_continue(segbuf, logs, sb_list) {
+		if (prev->sb_nextnum != segbuf->sb_nextnum) {
+			ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
+			WARN_ON(ret); /* never fails */
 		}
+		if (atomic_read(&segbuf->sb_err) &&
+		    segbuf->sb_segnum != nilfs->ns_nextnum)
+			/* Case 2: extended segment (!= next) failed */
+			nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
+		prev = segbuf;
 	}
 }
 
-static void nilfs_segctor_clear_segment_buffers(struct nilfs_sc_info *sci)
-{
-	struct nilfs_segment_buffer *segbuf;
-
-	list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list)
-		nilfs_segbuf_clear(segbuf);
-	sci->sc_super_root = NULL;
-}
-
-static void nilfs_segctor_destroy_segment_buffers(struct nilfs_sc_info *sci)
-{
-	struct nilfs_segment_buffer *segbuf;
-
-	while (!list_empty(&sci->sc_segbufs)) {
-		segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
-		list_del_init(&segbuf->sb_list);
-		nilfs_segbuf_free(segbuf);
-	}
-	/* sci->sc_curseg = NULL; */
-}
-
-static void nilfs_segctor_end_construction(struct nilfs_sc_info *sci,
-					   struct the_nilfs *nilfs, int err)
-{
-	if (unlikely(err)) {
-		nilfs_segctor_free_incomplete_segments(sci, nilfs);
-		if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
-			int ret;
-
-			ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
-							sci->sc_freesegs,
-							sci->sc_nfreesegs,
-							NULL);
-			WARN_ON(ret); /* do not happen */
-		}
-	}
-	nilfs_segctor_clear_segment_buffers(sci);
-}
-
 static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
 					  struct inode *sufile)
 {
 	struct nilfs_segment_buffer *segbuf;
-	struct buffer_head *bh_su;
-	struct nilfs_segment_usage *raw_su;
 	unsigned long live_blocks;
 	int ret;
 
 	list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
-		ret = nilfs_sufile_get_segment_usage(sufile, segbuf->sb_segnum,
-						     &raw_su, &bh_su);
-		WARN_ON(ret); /* always succeed because bh_su is dirty */
 		live_blocks = segbuf->sb_sum.nblocks +
 			(segbuf->sb_pseg_start - segbuf->sb_fseg_start);
-		raw_su->su_lastmod = cpu_to_le64(sci->sc_seg_ctime);
-		raw_su->su_nblocks = cpu_to_le32(live_blocks);
-		nilfs_sufile_put_segment_usage(sufile, segbuf->sb_segnum,
-					       bh_su);
+		ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
+						     live_blocks,
+						     sci->sc_seg_ctime);
+		WARN_ON(ret); /* always succeed because the segusage is dirty */
 	}
 }
 
-static void nilfs_segctor_cancel_segusage(struct nilfs_sc_info *sci,
-					  struct inode *sufile)
+static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
 {
 	struct nilfs_segment_buffer *segbuf;
-	struct buffer_head *bh_su;
-	struct nilfs_segment_usage *raw_su;
 	int ret;
 
-	segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
-	ret = nilfs_sufile_get_segment_usage(sufile, segbuf->sb_segnum,
-					     &raw_su, &bh_su);
-	WARN_ON(ret); /* always succeed because bh_su is dirty */
-	raw_su->su_nblocks = cpu_to_le32(segbuf->sb_pseg_start -
-					 segbuf->sb_fseg_start);
-	nilfs_sufile_put_segment_usage(sufile, segbuf->sb_segnum, bh_su);
+	segbuf = NILFS_FIRST_SEGBUF(logs);
+	ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
+					     segbuf->sb_pseg_start -
+					     segbuf->sb_fseg_start, 0);
+	WARN_ON(ret); /* always succeed because the segusage is dirty */
 
-	list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
-		ret = nilfs_sufile_get_segment_usage(sufile, segbuf->sb_segnum,
-						     &raw_su, &bh_su);
+	list_for_each_entry_continue(segbuf, logs, sb_list) {
+		ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
+						     0, 0);
 		WARN_ON(ret); /* always succeed */
-		raw_su->su_nblocks = 0;
-		nilfs_sufile_put_segment_usage(sufile, segbuf->sb_segnum,
-					       bh_su);
 	}
 }
 
@@ -1520,17 +1470,15 @@
 					    struct nilfs_segment_buffer *last,
 					    struct inode *sufile)
 {
-	struct nilfs_segment_buffer *segbuf = last, *n;
+	struct nilfs_segment_buffer *segbuf = last;
 	int ret;
 
-	list_for_each_entry_safe_continue(segbuf, n, &sci->sc_segbufs,
-					  sb_list) {
-		list_del_init(&segbuf->sb_list);
+	list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
 		sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
 		ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
 		WARN_ON(ret);
-		nilfs_segbuf_free(segbuf);
 	}
+	nilfs_truncate_logs(&sci->sc_segbufs, last);
 }
 
 
@@ -1569,7 +1517,7 @@
 							NULL);
 			WARN_ON(err); /* do not happen */
 		}
-		nilfs_segctor_clear_segment_buffers(sci);
+		nilfs_clear_logs(&sci->sc_segbufs);
 
 		err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
 		if (unlikely(err))
@@ -1814,26 +1762,18 @@
 }
 
 static int nilfs_segctor_write(struct nilfs_sc_info *sci,
-			       struct backing_dev_info *bdi)
+			       struct the_nilfs *nilfs)
 {
 	struct nilfs_segment_buffer *segbuf;
-	struct nilfs_write_info wi;
-	int err, res;
-
-	wi.sb = sci->sc_super;
-	wi.bh_sr = sci->sc_super_root;
-	wi.bdi = bdi;
+	int ret = 0;
 
 	list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
-		nilfs_segbuf_prepare_write(segbuf, &wi);
-		err = nilfs_segbuf_write(segbuf, &wi);
-
-		res = nilfs_segbuf_wait(segbuf, &wi);
-		err = err ? : res;
-		if (err)
-			return err;
+		ret = nilfs_segbuf_write(segbuf, nilfs);
+		if (ret)
+			break;
 	}
-	return 0;
+	list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
+	return ret;
 }
 
 static void __nilfs_end_page_io(struct page *page, int err)
@@ -1911,15 +1851,17 @@
 	}
 }
 
-static void nilfs_segctor_abort_write(struct nilfs_sc_info *sci,
-				      struct page *failed_page, int err)
+static void nilfs_abort_logs(struct list_head *logs, struct page *failed_page,
+			     struct buffer_head *bh_sr, int err)
 {
 	struct nilfs_segment_buffer *segbuf;
 	struct page *bd_page = NULL, *fs_page = NULL;
+	struct buffer_head *bh;
 
-	list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
-		struct buffer_head *bh;
+	if (list_empty(logs))
+		return;
 
+	list_for_each_entry(segbuf, logs, sb_list) {
 		list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
 				    b_assoc_buffers) {
 			if (bh->b_page != bd_page) {
@@ -1931,7 +1873,7 @@
 
 		list_for_each_entry(bh, &segbuf->sb_payload_buffers,
 				    b_assoc_buffers) {
-			if (bh == sci->sc_super_root) {
+			if (bh == bh_sr) {
 				if (bh->b_page != bd_page) {
 					end_page_writeback(bd_page);
 					bd_page = bh->b_page;
@@ -1941,7 +1883,7 @@
 			if (bh->b_page != fs_page) {
 				nilfs_end_page_io(fs_page, err);
 				if (fs_page && fs_page == failed_page)
-					goto done;
+					return;
 				fs_page = bh->b_page;
 			}
 		}
@@ -1950,8 +1892,34 @@
 		end_page_writeback(bd_page);
 
 	nilfs_end_page_io(fs_page, err);
- done:
+}
+
+static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
+					     struct the_nilfs *nilfs, int err)
+{
+	LIST_HEAD(logs);
+	int ret;
+
+	list_splice_tail_init(&sci->sc_write_logs, &logs);
+	ret = nilfs_wait_on_logs(&logs);
+	if (ret)
+		nilfs_abort_logs(&logs, NULL, sci->sc_super_root, ret);
+
+	list_splice_tail_init(&sci->sc_segbufs, &logs);
+	nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
+	nilfs_free_incomplete_logs(&logs, nilfs);
 	nilfs_clear_copied_buffers(&sci->sc_copied_buffers, err);
+
+	if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
+		ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
+						sci->sc_freesegs,
+						sci->sc_nfreesegs,
+						NULL);
+		WARN_ON(ret); /* do not happen */
+	}
+
+	nilfs_destroy_logs(&logs);
+	sci->sc_super_root = NULL;
 }
 
 static void nilfs_set_next_segment(struct the_nilfs *nilfs,
@@ -1973,7 +1941,7 @@
 	struct the_nilfs *nilfs = sbi->s_nilfs;
 	int update_sr = (sci->sc_super_root != NULL);
 
-	list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
+	list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
 		struct buffer_head *bh;
 
 		list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
@@ -2046,7 +2014,7 @@
 
 	sci->sc_nblk_inc += sci->sc_nblk_this_inc;
 
-	segbuf = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
+	segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
 	nilfs_set_next_segment(nilfs, segbuf);
 
 	if (update_sr) {
@@ -2057,10 +2025,23 @@
 		clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
 		clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
 		set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
+		nilfs_segctor_clear_metadata_dirty(sci);
 	} else
 		clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
 }
 
+static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
+{
+	int ret;
+
+	ret = nilfs_wait_on_logs(&sci->sc_write_logs);
+	if (!ret) {
+		nilfs_segctor_complete_write(sci);
+		nilfs_destroy_logs(&sci->sc_write_logs);
+	}
+	return ret;
+}
+
 static int nilfs_segctor_check_in_files(struct nilfs_sc_info *sci,
 					struct nilfs_sb_info *sbi)
 {
@@ -2173,7 +2154,7 @@
 		/* Avoid empty segment */
 		if (sci->sc_stage.scnt == NILFS_ST_DONE &&
 		    NILFS_SEG_EMPTY(&sci->sc_curseg->sb_sum)) {
-			nilfs_segctor_end_construction(sci, nilfs, 1);
+			nilfs_segctor_abort_construction(sci, nilfs, 1);
 			goto out;
 		}
 
@@ -2187,7 +2168,7 @@
 		if (has_sr) {
 			err = nilfs_segctor_fill_in_checkpoint(sci);
 			if (unlikely(err))
-				goto failed_to_make_up;
+				goto failed_to_write;
 
 			nilfs_segctor_fill_in_super_root(sci, nilfs);
 		}
@@ -2195,42 +2176,46 @@
 
 		/* Write partial segments */
 		err = nilfs_segctor_prepare_write(sci, &failed_page);
-		if (unlikely(err))
+		if (err) {
+			nilfs_abort_logs(&sci->sc_segbufs, failed_page,
+					 sci->sc_super_root, err);
 			goto failed_to_write;
-
+		}
 		nilfs_segctor_fill_in_checksums(sci, nilfs->ns_crc_seed);
 
-		err = nilfs_segctor_write(sci, nilfs->ns_bdi);
+		err = nilfs_segctor_write(sci, nilfs);
 		if (unlikely(err))
 			goto failed_to_write;
 
-		nilfs_segctor_complete_write(sci);
-
-		/* Commit segments */
-		if (has_sr)
-			nilfs_segctor_clear_metadata_dirty(sci);
-
-		nilfs_segctor_end_construction(sci, nilfs, 0);
-
+		if (sci->sc_stage.scnt == NILFS_ST_DONE ||
+		    nilfs->ns_blocksize_bits != PAGE_CACHE_SHIFT) {
+			/*
+			 * At this point, we avoid double buffering
+			 * for blocksize < pagesize because page dirty
+			 * flag is turned off during write and dirty
+			 * buffers are not properly collected for
+			 * pages crossing over segments.
+			 */
+			err = nilfs_segctor_wait(sci);
+			if (err)
+				goto failed_to_write;
+		}
 	} while (sci->sc_stage.scnt != NILFS_ST_DONE);
 
+	sci->sc_super_root = NULL;
+
  out:
-	nilfs_segctor_destroy_segment_buffers(sci);
 	nilfs_segctor_check_out_files(sci, sbi);
 	return err;
 
  failed_to_write:
-	nilfs_segctor_abort_write(sci, failed_page, err);
-	nilfs_segctor_cancel_segusage(sci, nilfs->ns_sufile);
-
- failed_to_make_up:
 	if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
 		nilfs_redirty_inodes(&sci->sc_dirty_files);
 
  failed:
 	if (nilfs_doing_gc())
 		nilfs_redirty_inodes(&sci->sc_gc_inodes);
-	nilfs_segctor_end_construction(sci, nilfs, err);
+	nilfs_segctor_abort_construction(sci, nilfs, err);
 	goto out;
 }
 
@@ -2559,7 +2544,7 @@
 
 	sci->sc_freesegs = kbufs[4];
 	sci->sc_nfreesegs = argv[4].v_nmembs;
-	list_splice_init(&nilfs->ns_gc_inodes, sci->sc_gc_inodes.prev);
+	list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
 
 	for (;;) {
 		nilfs_segctor_accept(sci, &req);
@@ -2788,6 +2773,7 @@
 	spin_lock_init(&sci->sc_state_lock);
 	INIT_LIST_HEAD(&sci->sc_dirty_files);
 	INIT_LIST_HEAD(&sci->sc_segbufs);
+	INIT_LIST_HEAD(&sci->sc_write_logs);
 	INIT_LIST_HEAD(&sci->sc_gc_inodes);
 	INIT_LIST_HEAD(&sci->sc_copied_buffers);
 
@@ -2855,6 +2841,7 @@
 	}
 
 	WARN_ON(!list_empty(&sci->sc_segbufs));
+	WARN_ON(!list_empty(&sci->sc_write_logs));
 
 	down_write(&sbi->s_nilfs->ns_segctor_sem);
 
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
index 0d2a475..3d3ab2f 100644
--- a/fs/nilfs2/segment.h
+++ b/fs/nilfs2/segment.h
@@ -97,6 +97,7 @@
  * @sc_dsync_start: start byte offset of data pages
  * @sc_dsync_end: end byte offset of data pages (inclusive)
  * @sc_segbufs: List of segment buffers
+ * @sc_write_logs: List of segment buffers to hold logs under writing
  * @sc_segbuf_nblocks: Number of available blocks in segment buffers.
  * @sc_curseg: Current segment buffer
  * @sc_super_root: Pointer to the super root buffer
@@ -143,6 +144,7 @@
 
 	/* Segment buffers */
 	struct list_head	sc_segbufs;
+	struct list_head	sc_write_logs;
 	unsigned long		sc_segbuf_nblocks;
 	struct nilfs_segment_buffer *sc_curseg;
 	struct buffer_head     *sc_super_root;
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
index 37994d4..b6c36d0 100644
--- a/fs/nilfs2/sufile.c
+++ b/fs/nilfs2/sufile.c
@@ -31,6 +31,16 @@
 #include "sufile.h"
 
 
+struct nilfs_sufile_info {
+	struct nilfs_mdt_info mi;
+	unsigned long ncleansegs;
+};
+
+static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
+{
+	return (struct nilfs_sufile_info *)NILFS_MDT(sufile);
+}
+
 static inline unsigned long
 nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
 {
@@ -62,14 +72,6 @@
 		     max - curr + 1);
 }
 
-static inline struct nilfs_sufile_header *
-nilfs_sufile_block_get_header(const struct inode *sufile,
-			      struct buffer_head *bh,
-			      void *kaddr)
-{
-	return kaddr + bh_offset(bh);
-}
-
 static struct nilfs_segment_usage *
 nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
 				     struct buffer_head *bh, void *kaddr)
@@ -110,6 +112,15 @@
 }
 
 /**
+ * nilfs_sufile_get_ncleansegs - return the number of clean segments
+ * @sufile: inode of segment usage file
+ */
+unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
+{
+	return NILFS_SUI(sufile)->ncleansegs;
+}
+
+/**
  * nilfs_sufile_updatev - modify multiple segment usages at a time
  * @sufile: inode of segment usage file
  * @segnumv: array of segment numbers
@@ -270,7 +281,7 @@
 	if (ret < 0)
 		goto out_sem;
 	kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
-	header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
+	header = kaddr + bh_offset(header_bh);
 	ncleansegs = le64_to_cpu(header->sh_ncleansegs);
 	last_alloc = le64_to_cpu(header->sh_last_alloc);
 	kunmap_atomic(kaddr, KM_USER0);
@@ -302,13 +313,13 @@
 			kunmap_atomic(kaddr, KM_USER0);
 
 			kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
-			header = nilfs_sufile_block_get_header(
-				sufile, header_bh, kaddr);
+			header = kaddr + bh_offset(header_bh);
 			le64_add_cpu(&header->sh_ncleansegs, -1);
 			le64_add_cpu(&header->sh_ndirtysegs, 1);
 			header->sh_last_alloc = cpu_to_le64(segnum);
 			kunmap_atomic(kaddr, KM_USER0);
 
+			NILFS_SUI(sufile)->ncleansegs--;
 			nilfs_mdt_mark_buffer_dirty(header_bh);
 			nilfs_mdt_mark_buffer_dirty(su_bh);
 			nilfs_mdt_mark_dirty(sufile);
@@ -351,6 +362,8 @@
 	kunmap_atomic(kaddr, KM_USER0);
 
 	nilfs_sufile_mod_counter(header_bh, -1, 1);
+	NILFS_SUI(sufile)->ncleansegs--;
+
 	nilfs_mdt_mark_buffer_dirty(su_bh);
 	nilfs_mdt_mark_dirty(sufile);
 }
@@ -380,6 +393,8 @@
 	kunmap_atomic(kaddr, KM_USER0);
 
 	nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
+	NILFS_SUI(sufile)->ncleansegs -= clean;
+
 	nilfs_mdt_mark_buffer_dirty(su_bh);
 	nilfs_mdt_mark_dirty(sufile);
 }
@@ -409,58 +424,61 @@
 	nilfs_mdt_mark_buffer_dirty(su_bh);
 
 	nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
+	NILFS_SUI(sufile)->ncleansegs++;
+
 	nilfs_mdt_mark_dirty(sufile);
 }
 
 /**
- * nilfs_sufile_get_segment_usage - get a segment usage
+ * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty
  * @sufile: inode of segment usage file
  * @segnum: segment number
- * @sup: pointer to segment usage
- * @bhp: pointer to buffer head
- *
- * Description: nilfs_sufile_get_segment_usage() acquires the segment usage
- * specified by @segnum.
- *
- * Return Value: On success, 0 is returned, and the segment usage and the
- * buffer head of the buffer on which the segment usage is located are stored
- * in the place pointed by @sup and @bhp, respectively. On error, one of the
- * following negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-EINVAL - Invalid segment usage number.
  */
-int nilfs_sufile_get_segment_usage(struct inode *sufile, __u64 segnum,
-				   struct nilfs_segment_usage **sup,
-				   struct buffer_head **bhp)
+int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
+{
+	struct buffer_head *bh;
+	int ret;
+
+	ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
+	if (!ret) {
+		nilfs_mdt_mark_buffer_dirty(bh);
+		nilfs_mdt_mark_dirty(sufile);
+		brelse(bh);
+	}
+	return ret;
+}
+
+/**
+ * nilfs_sufile_set_segment_usage - set usage of a segment
+ * @sufile: inode of segment usage file
+ * @segnum: segment number
+ * @nblocks: number of live blocks in the segment
+ * @modtime: modification time (option)
+ */
+int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
+				   unsigned long nblocks, time_t modtime)
 {
 	struct buffer_head *bh;
 	struct nilfs_segment_usage *su;
 	void *kaddr;
 	int ret;
 
-	/* segnum is 0 origin */
-	if (segnum >= nilfs_sufile_get_nsegments(sufile))
-		return -EINVAL;
 	down_write(&NILFS_MDT(sufile)->mi_sem);
-	ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, &bh);
+	ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
 	if (ret < 0)
 		goto out_sem;
-	kaddr = kmap(bh->b_page);
-	su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
-	if (nilfs_segment_usage_error(su)) {
-		kunmap(bh->b_page);
-		brelse(bh);
-		ret = -EINVAL;
-		goto out_sem;
-	}
 
-	if (sup != NULL)
-		*sup = su;
-	*bhp = bh;
+	kaddr = kmap_atomic(bh->b_page, KM_USER0);
+	su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
+	WARN_ON(nilfs_segment_usage_error(su));
+	if (modtime)
+		su->su_lastmod = cpu_to_le64(modtime);
+	su->su_nblocks = cpu_to_le32(nblocks);
+	kunmap_atomic(kaddr, KM_USER0);
+
+	nilfs_mdt_mark_buffer_dirty(bh);
+	nilfs_mdt_mark_dirty(sufile);
+	brelse(bh);
 
  out_sem:
 	up_write(&NILFS_MDT(sufile)->mi_sem);
@@ -468,23 +486,6 @@
 }
 
 /**
- * nilfs_sufile_put_segment_usage - put a segment usage
- * @sufile: inode of segment usage file
- * @segnum: segment number
- * @bh: buffer head
- *
- * Description: nilfs_sufile_put_segment_usage() releases the segment usage
- * specified by @segnum. @bh must be the buffer head which have been returned
- * by a previous call to nilfs_sufile_get_segment_usage() with @segnum.
- */
-void nilfs_sufile_put_segment_usage(struct inode *sufile, __u64 segnum,
-				    struct buffer_head *bh)
-{
-	kunmap(bh->b_page);
-	brelse(bh);
-}
-
-/**
  * nilfs_sufile_get_stat - get segment usage statistics
  * @sufile: inode of segment usage file
  * @stat: pointer to a structure of segment usage statistics
@@ -515,7 +516,7 @@
 		goto out_sem;
 
 	kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
-	header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
+	header = kaddr + bh_offset(header_bh);
 	sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
 	sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
 	sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
@@ -532,33 +533,6 @@
 	return ret;
 }
 
-/**
- * nilfs_sufile_get_ncleansegs - get the number of clean segments
- * @sufile: inode of segment usage file
- * @nsegsp: pointer to the number of clean segments
- *
- * Description: nilfs_sufile_get_ncleansegs() acquires the number of clean
- * segments.
- *
- * Return Value: On success, 0 is returned and the number of clean segments is
- * stored in the place pointed by @nsegsp. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- */
-int nilfs_sufile_get_ncleansegs(struct inode *sufile, unsigned long *nsegsp)
-{
-	struct nilfs_sustat sustat;
-	int ret;
-
-	ret = nilfs_sufile_get_stat(sufile, &sustat);
-	if (ret == 0)
-		*nsegsp = sustat.ss_ncleansegs;
-	return ret;
-}
-
 void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
 			       struct buffer_head *header_bh,
 			       struct buffer_head *su_bh)
@@ -577,8 +551,10 @@
 	nilfs_segment_usage_set_error(su);
 	kunmap_atomic(kaddr, KM_USER0);
 
-	if (suclean)
+	if (suclean) {
 		nilfs_sufile_mod_counter(header_bh, -1, 0);
+		NILFS_SUI(sufile)->ncleansegs--;
+	}
 	nilfs_mdt_mark_buffer_dirty(su_bh);
 	nilfs_mdt_mark_dirty(sufile);
 }
@@ -657,3 +633,48 @@
 	up_read(&NILFS_MDT(sufile)->mi_sem);
 	return ret;
 }
+
+/**
+ * nilfs_sufile_read - read sufile inode
+ * @sufile: sufile inode
+ * @raw_inode: on-disk sufile inode
+ */
+int nilfs_sufile_read(struct inode *sufile, struct nilfs_inode *raw_inode)
+{
+	struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
+	struct buffer_head *header_bh;
+	struct nilfs_sufile_header *header;
+	void *kaddr;
+	int ret;
+
+	ret = nilfs_read_inode_common(sufile, raw_inode);
+	if (ret < 0)
+		return ret;
+
+	ret = nilfs_sufile_get_header_block(sufile, &header_bh);
+	if (!ret) {
+		kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
+		header = kaddr + bh_offset(header_bh);
+		sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
+		kunmap_atomic(kaddr, KM_USER0);
+		brelse(header_bh);
+	}
+	return ret;
+}
+
+/**
+ * nilfs_sufile_new - create sufile
+ * @nilfs: nilfs object
+ * @susize: size of a segment usage entry
+ */
+struct inode *nilfs_sufile_new(struct the_nilfs *nilfs, size_t susize)
+{
+	struct inode *sufile;
+
+	sufile = nilfs_mdt_new(nilfs, NULL, NILFS_SUFILE_INO,
+			       sizeof(struct nilfs_sufile_info));
+	if (sufile)
+		nilfs_mdt_set_entry_size(sufile, susize,
+					 sizeof(struct nilfs_sufile_header));
+	return sufile;
+}
diff --git a/fs/nilfs2/sufile.h b/fs/nilfs2/sufile.h
index 0e99e5c..15163b8 100644
--- a/fs/nilfs2/sufile.h
+++ b/fs/nilfs2/sufile.h
@@ -34,14 +34,13 @@
 	return NILFS_MDT(sufile)->mi_nilfs->ns_nsegments;
 }
 
+unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile);
+
 int nilfs_sufile_alloc(struct inode *, __u64 *);
-int nilfs_sufile_get_segment_usage(struct inode *, __u64,
-				   struct nilfs_segment_usage **,
-				   struct buffer_head **);
-void nilfs_sufile_put_segment_usage(struct inode *, __u64,
-				    struct buffer_head *);
+int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum);
+int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
+				   unsigned long nblocks, time_t modtime);
 int nilfs_sufile_get_stat(struct inode *, struct nilfs_sustat *);
-int nilfs_sufile_get_ncleansegs(struct inode *, unsigned long *);
 ssize_t nilfs_sufile_get_suinfo(struct inode *, __u64, void *, unsigned,
 				size_t);
 
@@ -62,6 +61,9 @@
 void nilfs_sufile_do_set_error(struct inode *, __u64, struct buffer_head *,
 			       struct buffer_head *);
 
+int nilfs_sufile_read(struct inode *sufile, struct nilfs_inode *raw_inode);
+struct inode *nilfs_sufile_new(struct the_nilfs *nilfs, size_t susize);
+
 /**
  * nilfs_sufile_scrap - make a segment garbage
  * @sufile: inode of segment usage file
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 644e667..5403b3e 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -363,14 +363,10 @@
 	list_add(&sbi->s_list, &nilfs->ns_supers);
 	up_write(&nilfs->ns_super_sem);
 
-	sbi->s_ifile = nilfs_mdt_new(nilfs, sbi->s_super, NILFS_IFILE_INO);
+	sbi->s_ifile = nilfs_ifile_new(sbi, nilfs->ns_inode_size);
 	if (!sbi->s_ifile)
 		return -ENOMEM;
 
-	err = nilfs_palloc_init_blockgroup(sbi->s_ifile, nilfs->ns_inode_size);
-	if (unlikely(err))
-		goto failed;
-
 	down_read(&nilfs->ns_segctor_sem);
 	err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, cno, 0, &raw_cp,
 					  &bh_cp);
@@ -411,7 +407,6 @@
 {
 	struct the_nilfs *nilfs = sbi->s_nilfs;
 
-	nilfs_mdt_clear(sbi->s_ifile);
 	nilfs_mdt_destroy(sbi->s_ifile);
 	sbi->s_ifile = NULL;
 	down_write(&nilfs->ns_super_sem);
@@ -419,22 +414,6 @@
 	up_write(&nilfs->ns_super_sem);
 }
 
-static int nilfs_mark_recovery_complete(struct nilfs_sb_info *sbi)
-{
-	struct the_nilfs *nilfs = sbi->s_nilfs;
-	int err = 0;
-
-	down_write(&nilfs->ns_sem);
-	if (!(nilfs->ns_mount_state & NILFS_VALID_FS)) {
-		nilfs->ns_mount_state |= NILFS_VALID_FS;
-		err = nilfs_commit_super(sbi, 1);
-		if (likely(!err))
-			printk(KERN_INFO "NILFS: recovery complete.\n");
-	}
-	up_write(&nilfs->ns_sem);
-	return err;
-}
-
 static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
 	struct super_block *sb = dentry->d_sb;
@@ -490,7 +469,7 @@
 	struct nilfs_sb_info *sbi = NILFS_SB(sb);
 
 	if (!nilfs_test_opt(sbi, BARRIER))
-		seq_printf(seq, ",barrier=off");
+		seq_printf(seq, ",nobarrier");
 	if (nilfs_test_opt(sbi, SNAPSHOT))
 		seq_printf(seq, ",cp=%llu",
 			   (unsigned long long int)sbi->s_snapshot_cno);
@@ -500,6 +479,8 @@
 		seq_printf(seq, ",errors=panic");
 	if (nilfs_test_opt(sbi, STRICT_ORDER))
 		seq_printf(seq, ",order=strict");
+	if (nilfs_test_opt(sbi, NORECOVERY))
+		seq_printf(seq, ",norecovery");
 
 	return 0;
 }
@@ -568,7 +549,7 @@
 
 enum {
 	Opt_err_cont, Opt_err_panic, Opt_err_ro,
-	Opt_barrier, Opt_snapshot, Opt_order,
+	Opt_nobarrier, Opt_snapshot, Opt_order, Opt_norecovery,
 	Opt_err,
 };
 
@@ -576,25 +557,13 @@
 	{Opt_err_cont, "errors=continue"},
 	{Opt_err_panic, "errors=panic"},
 	{Opt_err_ro, "errors=remount-ro"},
-	{Opt_barrier, "barrier=%s"},
+	{Opt_nobarrier, "nobarrier"},
 	{Opt_snapshot, "cp=%u"},
 	{Opt_order, "order=%s"},
+	{Opt_norecovery, "norecovery"},
 	{Opt_err, NULL}
 };
 
-static int match_bool(substring_t *s, int *result)
-{
-	int len = s->to - s->from;
-
-	if (strncmp(s->from, "on", len) == 0)
-		*result = 1;
-	else if (strncmp(s->from, "off", len) == 0)
-		*result = 0;
-	else
-		return 1;
-	return 0;
-}
-
 static int parse_options(char *options, struct super_block *sb)
 {
 	struct nilfs_sb_info *sbi = NILFS_SB(sb);
@@ -612,13 +581,8 @@
 
 		token = match_token(p, tokens, args);
 		switch (token) {
-		case Opt_barrier:
-			if (match_bool(&args[0], &option))
-				return 0;
-			if (option)
-				nilfs_set_opt(sbi, BARRIER);
-			else
-				nilfs_clear_opt(sbi, BARRIER);
+		case Opt_nobarrier:
+			nilfs_clear_opt(sbi, BARRIER);
 			break;
 		case Opt_order:
 			if (strcmp(args[0].from, "relaxed") == 0)
@@ -647,6 +611,9 @@
 			sbi->s_snapshot_cno = option;
 			nilfs_set_opt(sbi, SNAPSHOT);
 			break;
+		case Opt_norecovery:
+			nilfs_set_opt(sbi, NORECOVERY);
+			break;
 		default:
 			printk(KERN_ERR
 			       "NILFS: Unrecognized mount option \"%s\"\n", p);
@@ -672,9 +639,7 @@
 	int mnt_count = le16_to_cpu(sbp->s_mnt_count);
 
 	/* nilfs->sem must be locked by the caller. */
-	if (!(nilfs->ns_mount_state & NILFS_VALID_FS)) {
-		printk(KERN_WARNING "NILFS warning: mounting unchecked fs\n");
-	} else if (nilfs->ns_mount_state & NILFS_ERROR_FS) {
+	if (nilfs->ns_mount_state & NILFS_ERROR_FS) {
 		printk(KERN_WARNING
 		       "NILFS warning: mounting fs with errors\n");
 #if 0
@@ -782,11 +747,10 @@
 	sb->s_root = NULL;
 	sb->s_time_gran = 1;
 
-	if (!nilfs_loaded(nilfs)) {
-		err = load_nilfs(nilfs, sbi);
-		if (err)
-			goto failed_sbi;
-	}
+	err = load_nilfs(nilfs, sbi);
+	if (err)
+		goto failed_sbi;
+
 	cno = nilfs_last_cno(nilfs);
 
 	if (sb->s_flags & MS_RDONLY) {
@@ -854,12 +818,6 @@
 		up_write(&nilfs->ns_sem);
 	}
 
-	err = nilfs_mark_recovery_complete(sbi);
-	if (unlikely(err)) {
-		printk(KERN_ERR "NILFS: recovery failed.\n");
-		goto failed_root;
-	}
-
 	down_write(&nilfs->ns_super_sem);
 	if (!nilfs_test_opt(sbi, SNAPSHOT))
 		nilfs->ns_current = sbi;
@@ -867,10 +825,6 @@
 
 	return 0;
 
- failed_root:
-	dput(sb->s_root);
-	sb->s_root = NULL;
-
  failed_segctor:
 	nilfs_detach_segment_constructor(sbi);
 
@@ -915,6 +869,14 @@
 		goto restore_opts;
 	}
 
+	if (!nilfs_valid_fs(nilfs)) {
+		printk(KERN_WARNING "NILFS (device %s): couldn't "
+		       "remount because the filesystem is in an "
+		       "incomplete recovery state.\n", sb->s_id);
+		err = -EINVAL;
+		goto restore_opts;
+	}
+
 	if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
 		goto out;
 	if (*flags & MS_RDONLY) {
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index ad391a8..6241e17 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -146,13 +146,9 @@
 
 	might_sleep();
 	if (nilfs_loaded(nilfs)) {
-		nilfs_mdt_clear(nilfs->ns_sufile);
 		nilfs_mdt_destroy(nilfs->ns_sufile);
-		nilfs_mdt_clear(nilfs->ns_cpfile);
 		nilfs_mdt_destroy(nilfs->ns_cpfile);
-		nilfs_mdt_clear(nilfs->ns_dat);
 		nilfs_mdt_destroy(nilfs->ns_dat);
-		/* XXX: how and when to clear nilfs->ns_gc_dat? */
 		nilfs_mdt_destroy(nilfs->ns_gc_dat);
 	}
 	if (nilfs_init(nilfs)) {
@@ -166,7 +162,6 @@
 static int nilfs_load_super_root(struct the_nilfs *nilfs,
 				 struct nilfs_sb_info *sbi, sector_t sr_block)
 {
-	static struct lock_class_key dat_lock_key;
 	struct buffer_head *bh_sr;
 	struct nilfs_super_root *raw_sr;
 	struct nilfs_super_block **sbp = nilfs->ns_sbp;
@@ -187,51 +182,36 @@
 	inode_size = nilfs->ns_inode_size;
 
 	err = -ENOMEM;
-	nilfs->ns_dat = nilfs_mdt_new(nilfs, NULL, NILFS_DAT_INO);
+	nilfs->ns_dat = nilfs_dat_new(nilfs, dat_entry_size);
 	if (unlikely(!nilfs->ns_dat))
 		goto failed;
 
-	nilfs->ns_gc_dat = nilfs_mdt_new(nilfs, NULL, NILFS_DAT_INO);
+	nilfs->ns_gc_dat = nilfs_dat_new(nilfs, dat_entry_size);
 	if (unlikely(!nilfs->ns_gc_dat))
 		goto failed_dat;
 
-	nilfs->ns_cpfile = nilfs_mdt_new(nilfs, NULL, NILFS_CPFILE_INO);
+	nilfs->ns_cpfile = nilfs_cpfile_new(nilfs, checkpoint_size);
 	if (unlikely(!nilfs->ns_cpfile))
 		goto failed_gc_dat;
 
-	nilfs->ns_sufile = nilfs_mdt_new(nilfs, NULL, NILFS_SUFILE_INO);
+	nilfs->ns_sufile = nilfs_sufile_new(nilfs, segment_usage_size);
 	if (unlikely(!nilfs->ns_sufile))
 		goto failed_cpfile;
 
-	err = nilfs_palloc_init_blockgroup(nilfs->ns_dat, dat_entry_size);
-	if (unlikely(err))
-		goto failed_sufile;
-
-	err = nilfs_palloc_init_blockgroup(nilfs->ns_gc_dat, dat_entry_size);
-	if (unlikely(err))
-		goto failed_sufile;
-
-	lockdep_set_class(&NILFS_MDT(nilfs->ns_dat)->mi_sem, &dat_lock_key);
-	lockdep_set_class(&NILFS_MDT(nilfs->ns_gc_dat)->mi_sem, &dat_lock_key);
-
 	nilfs_mdt_set_shadow(nilfs->ns_dat, nilfs->ns_gc_dat);
-	nilfs_mdt_set_entry_size(nilfs->ns_cpfile, checkpoint_size,
-				 sizeof(struct nilfs_cpfile_header));
-	nilfs_mdt_set_entry_size(nilfs->ns_sufile, segment_usage_size,
-				 sizeof(struct nilfs_sufile_header));
 
-	err = nilfs_mdt_read_inode_direct(
-		nilfs->ns_dat, bh_sr, NILFS_SR_DAT_OFFSET(inode_size));
+	err = nilfs_dat_read(nilfs->ns_dat, (void *)bh_sr->b_data +
+			     NILFS_SR_DAT_OFFSET(inode_size));
 	if (unlikely(err))
 		goto failed_sufile;
 
-	err = nilfs_mdt_read_inode_direct(
-		nilfs->ns_cpfile, bh_sr, NILFS_SR_CPFILE_OFFSET(inode_size));
+	err = nilfs_cpfile_read(nilfs->ns_cpfile, (void *)bh_sr->b_data +
+				NILFS_SR_CPFILE_OFFSET(inode_size));
 	if (unlikely(err))
 		goto failed_sufile;
 
-	err = nilfs_mdt_read_inode_direct(
-		nilfs->ns_sufile, bh_sr, NILFS_SR_SUFILE_OFFSET(inode_size));
+	err = nilfs_sufile_read(nilfs->ns_sufile, (void *)bh_sr->b_data +
+				NILFS_SR_SUFILE_OFFSET(inode_size));
 	if (unlikely(err))
 		goto failed_sufile;
 
@@ -281,29 +261,30 @@
 	struct nilfs_recovery_info ri;
 	unsigned int s_flags = sbi->s_super->s_flags;
 	int really_read_only = bdev_read_only(nilfs->ns_bdev);
-	unsigned valid_fs;
-	int err = 0;
+	int valid_fs = nilfs_valid_fs(nilfs);
+	int err;
+
+	if (nilfs_loaded(nilfs)) {
+		if (valid_fs ||
+		    ((s_flags & MS_RDONLY) && nilfs_test_opt(sbi, NORECOVERY)))
+			return 0;
+		printk(KERN_ERR "NILFS: the filesystem is in an incomplete "
+		       "recovery state.\n");
+		return -EINVAL;
+	}
+
+	if (!valid_fs) {
+		printk(KERN_WARNING "NILFS warning: mounting unchecked fs\n");
+		if (s_flags & MS_RDONLY) {
+			printk(KERN_INFO "NILFS: INFO: recovery "
+			       "required for readonly filesystem.\n");
+			printk(KERN_INFO "NILFS: write access will "
+			       "be enabled during recovery.\n");
+		}
+	}
 
 	nilfs_init_recovery_info(&ri);
 
-	down_write(&nilfs->ns_sem);
-	valid_fs = (nilfs->ns_mount_state & NILFS_VALID_FS);
-	up_write(&nilfs->ns_sem);
-
-	if (!valid_fs && (s_flags & MS_RDONLY)) {
-		printk(KERN_INFO "NILFS: INFO: recovery "
-		       "required for readonly filesystem.\n");
-		if (really_read_only) {
-			printk(KERN_ERR "NILFS: write access "
-			       "unavailable, cannot proceed.\n");
-			err = -EROFS;
-			goto failed;
-		}
-		printk(KERN_INFO "NILFS: write access will "
-		       "be enabled during recovery.\n");
-		sbi->s_super->s_flags &= ~MS_RDONLY;
-	}
-
 	err = nilfs_search_super_root(nilfs, sbi, &ri);
 	if (unlikely(err)) {
 		printk(KERN_ERR "NILFS: error searching super root.\n");
@@ -316,19 +297,56 @@
 		goto failed;
 	}
 
-	if (!valid_fs) {
-		err = nilfs_recover_logical_segments(nilfs, sbi, &ri);
-		if (unlikely(err)) {
-			nilfs_mdt_destroy(nilfs->ns_cpfile);
-			nilfs_mdt_destroy(nilfs->ns_sufile);
-			nilfs_mdt_destroy(nilfs->ns_dat);
-			goto failed;
+	if (valid_fs)
+		goto skip_recovery;
+
+	if (s_flags & MS_RDONLY) {
+		if (nilfs_test_opt(sbi, NORECOVERY)) {
+			printk(KERN_INFO "NILFS: norecovery option specified. "
+			       "skipping roll-forward recovery\n");
+			goto skip_recovery;
 		}
-		if (ri.ri_need_recovery == NILFS_RECOVERY_SR_UPDATED)
-			sbi->s_super->s_dirt = 1;
+		if (really_read_only) {
+			printk(KERN_ERR "NILFS: write access "
+			       "unavailable, cannot proceed.\n");
+			err = -EROFS;
+			goto failed_unload;
+		}
+		sbi->s_super->s_flags &= ~MS_RDONLY;
+	} else if (nilfs_test_opt(sbi, NORECOVERY)) {
+		printk(KERN_ERR "NILFS: recovery cancelled because norecovery "
+		       "option was specified for a read/write mount\n");
+		err = -EINVAL;
+		goto failed_unload;
 	}
 
+	err = nilfs_recover_logical_segments(nilfs, sbi, &ri);
+	if (err)
+		goto failed_unload;
+
+	down_write(&nilfs->ns_sem);
+	nilfs->ns_mount_state |= NILFS_VALID_FS;
+	nilfs->ns_sbp[0]->s_state = cpu_to_le16(nilfs->ns_mount_state);
+	err = nilfs_commit_super(sbi, 1);
+	up_write(&nilfs->ns_sem);
+
+	if (err) {
+		printk(KERN_ERR "NILFS: failed to update super block. "
+		       "recovery unfinished.\n");
+		goto failed_unload;
+	}
+	printk(KERN_INFO "NILFS: recovery complete.\n");
+
+ skip_recovery:
 	set_nilfs_loaded(nilfs);
+	nilfs_clear_recovery_info(&ri);
+	sbi->s_super->s_flags = s_flags;
+	return 0;
+
+ failed_unload:
+	nilfs_mdt_destroy(nilfs->ns_cpfile);
+	nilfs_mdt_destroy(nilfs->ns_sufile);
+	nilfs_mdt_destroy(nilfs->ns_dat);
 
  failed:
 	nilfs_clear_recovery_info(&ri);
@@ -632,30 +650,23 @@
 {
 	struct inode *dat = nilfs_dat_inode(nilfs);
 	unsigned long ncleansegs;
-	int err;
 
 	down_read(&NILFS_MDT(dat)->mi_sem);	/* XXX */
-	err = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile, &ncleansegs);
+	ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile);
 	up_read(&NILFS_MDT(dat)->mi_sem);	/* XXX */
-	if (likely(!err))
-		*nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment;
-	return err;
+	*nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment;
+	return 0;
 }
 
 int nilfs_near_disk_full(struct the_nilfs *nilfs)
 {
-	struct inode *sufile = nilfs->ns_sufile;
 	unsigned long ncleansegs, nincsegs;
-	int ret;
 
-	ret = nilfs_sufile_get_ncleansegs(sufile, &ncleansegs);
-	if (likely(!ret)) {
-		nincsegs = atomic_read(&nilfs->ns_ndirtyblks) /
-			nilfs->ns_blocks_per_segment + 1;
-		if (ncleansegs <= nilfs->ns_nrsvsegs + nincsegs)
-			ret++;
-	}
-	return ret;
+	ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile);
+	nincsegs = atomic_read(&nilfs->ns_ndirtyblks) /
+		nilfs->ns_blocks_per_segment + 1;
+
+	return ncleansegs <= nilfs->ns_nrsvsegs + nincsegs;
 }
 
 /**
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index 20abd55..589786e 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -258,6 +258,16 @@
 		kfree(sbi);
 }
 
+static inline int nilfs_valid_fs(struct the_nilfs *nilfs)
+{
+	unsigned valid_fs;
+
+	down_read(&nilfs->ns_sem);
+	valid_fs = (nilfs->ns_mount_state & NILFS_VALID_FS);
+	up_read(&nilfs->ns_sem);
+	return valid_fs;
+}
+
 static inline void
 nilfs_get_segment_range(struct the_nilfs *nilfs, __u64 segnum,
 			sector_t *seg_start, sector_t *seg_end)
diff --git a/include/acpi/acpi_hest.h b/include/acpi/acpi_hest.h
new file mode 100644
index 0000000..63194d0
--- /dev/null
+++ b/include/acpi/acpi_hest.h
@@ -0,0 +1,12 @@
+#ifndef __ACPI_HEST_H
+#define __ACPI_HEST_H
+
+#include <linux/pci.h>
+
+#ifdef CONFIG_ACPI
+extern int acpi_hest_firmware_first_pci(struct pci_dev *pci);
+#else
+static inline int acpi_hest_firmware_first_pci(struct pci_dev *pci) { return 0; }
+#endif
+
+#endif
diff --git a/include/drm/Kbuild b/include/drm/Kbuild
index b940fdf..cfa6af4 100644
--- a/include/drm/Kbuild
+++ b/include/drm/Kbuild
@@ -8,3 +8,4 @@
 unifdef-y += sis_drm.h
 unifdef-y += savage_drm.h
 unifdef-y += via_drm.h
+unifdef-y += nouveau_drm.h
diff --git a/include/drm/drm.h b/include/drm/drm.h
index 7cb50bd..e3f46e0 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -36,17 +36,27 @@
 #ifndef _DRM_H_
 #define _DRM_H_
 
-#include <linux/types.h>
-#include <asm/ioctl.h>		/* For _IO* macros */
-#define DRM_IOCTL_NR(n)		_IOC_NR(n)
-#define DRM_IOC_VOID		_IOC_NONE
-#define DRM_IOC_READ		_IOC_READ
-#define DRM_IOC_WRITE		_IOC_WRITE
-#define DRM_IOC_READWRITE	_IOC_READ|_IOC_WRITE
-#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
+#if defined(__linux__)
 
-#define DRM_MAJOR       226
-#define DRM_MAX_MINOR   15
+#include <linux/types.h>
+#include <asm/ioctl.h>
+typedef unsigned int drm_handle_t;
+
+#else /* One of the BSDs */
+
+#include <sys/ioccom.h>
+#include <sys/types.h>
+typedef int8_t   __s8;
+typedef uint8_t  __u8;
+typedef int16_t  __s16;
+typedef uint16_t __u16;
+typedef int32_t  __s32;
+typedef uint32_t __u32;
+typedef int64_t  __s64;
+typedef uint64_t __u64;
+typedef unsigned long drm_handle_t;
+
+#endif
 
 #define DRM_NAME	"drm"	  /**< Name in kernel, /dev, and /proc */
 #define DRM_MIN_ORDER	5	  /**< At least 2^5 bytes = 32 bytes */
@@ -59,7 +69,6 @@
 #define _DRM_LOCK_IS_CONT(lock)	   ((lock) & _DRM_LOCK_CONT)
 #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
 
-typedef unsigned int drm_handle_t;
 typedef unsigned int drm_context_t;
 typedef unsigned int drm_drawable_t;
 typedef unsigned int drm_magic_t;
@@ -454,6 +463,7 @@
 enum drm_vblank_seq_type {
 	_DRM_VBLANK_ABSOLUTE = 0x0,	/**< Wait for specific vblank sequence number */
 	_DRM_VBLANK_RELATIVE = 0x1,	/**< Wait for given number of vblanks */
+	_DRM_VBLANK_EVENT = 0x4000000,   /**< Send event instead of blocking */
 	_DRM_VBLANK_FLIP = 0x8000000,   /**< Scheduled buffer swap should flip */
 	_DRM_VBLANK_NEXTONMISS = 0x10000000,	/**< If missed, wait for next vblank */
 	_DRM_VBLANK_SECONDARY = 0x20000000,	/**< Secondary display controller */
@@ -461,8 +471,8 @@
 };
 
 #define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
-#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
-				_DRM_VBLANK_NEXTONMISS)
+#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
+				_DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
 
 struct drm_wait_vblank_request {
 	enum drm_vblank_seq_type type;
@@ -686,6 +696,8 @@
 #define DRM_IOCTL_MODE_GETFB		DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
 #define DRM_IOCTL_MODE_ADDFB		DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
 #define DRM_IOCTL_MODE_RMFB		DRM_IOWR(0xAF, unsigned int)
+#define DRM_IOCTL_MODE_PAGE_FLIP	DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
+#define DRM_IOCTL_MODE_DIRTYFB		DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
 
 /**
  * Device specific ioctls should only be in their respective headers
@@ -698,6 +710,35 @@
 #define DRM_COMMAND_BASE                0x40
 #define DRM_COMMAND_END			0xA0
 
+/**
+ * Header for events written back to userspace on the drm fd.  The
+ * type defines the type of event, the length specifies the total
+ * length of the event (including the header), and user_data is
+ * typically a 64 bit value passed with the ioctl that triggered the
+ * event.  A read on the drm fd will always only return complete
+ * events, that is, if for example the read buffer is 100 bytes, and
+ * there are two 64 byte events pending, only one will be returned.
+ *
+ * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
+ * up are chipset specific.
+ */
+struct drm_event {
+	__u32 type;
+	__u32 length;
+};
+
+#define DRM_EVENT_VBLANK 0x01
+#define DRM_EVENT_FLIP_COMPLETE 0x02
+
+struct drm_event_vblank {
+	struct drm_event base;
+	__u64 user_data;
+	__u32 tv_sec;
+	__u32 tv_usec;
+	__u32 sequence;
+	__u32 reserved;
+};
+
 /* typedef area */
 #ifndef __KERNEL__
 typedef struct drm_clip_rect drm_clip_rect_t;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index c8e64bb..19ef8eb 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -245,16 +245,6 @@
 
 #endif
 
-#define DRM_PROC_LIMIT (PAGE_SIZE-80)
-
-#define DRM_PROC_PRINT(fmt, arg...)					\
-   len += sprintf(&buf[len], fmt , ##arg);				\
-   if (len > DRM_PROC_LIMIT) { *eof = 1; return len - offset; }
-
-#define DRM_PROC_PRINT_RET(ret, fmt, arg...)				\
-   len += sprintf(&buf[len], fmt , ##arg);				\
-   if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; }
-
 /*@}*/
 
 /***********************************************************************/
@@ -265,19 +255,8 @@
 
 #define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
 #define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
-#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist)
 
 #define DRM_IF_VERSION(maj, min) (maj << 16 | min)
-/**
- * Get the private SAREA mapping.
- *
- * \param _dev DRM device.
- * \param _ctx context number.
- * \param _map output mapping.
- */
-#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do {	\
-	(_map) = (_dev)->context_sareas[_ctx];		\
-} while(0)
 
 /**
  * Test that the hardware lock is held by the caller, returning otherwise.
@@ -297,18 +276,6 @@
 } while (0)
 
 /**
- * Copy and IOCTL return string to user space
- */
-#define DRM_COPY( name, value )						\
-	len = strlen( value );						\
-	if ( len > name##_len ) len = name##_len;			\
-	name##_len = strlen( value );					\
-	if ( len && name ) {						\
-		if ( copy_to_user( name, value, len ) )			\
-			return -EFAULT;					\
-	}
-
-/**
  * Ioctl function type.
  *
  * \param inode device inode.
@@ -322,6 +289,9 @@
 typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
 			       unsigned long arg);
 
+#define DRM_IOCTL_NR(n)                _IOC_NR(n)
+#define DRM_MAJOR       226
+
 #define DRM_AUTH	0x1
 #define	DRM_MASTER	0x2
 #define DRM_ROOT_ONLY	0x4
@@ -426,6 +396,14 @@
 	struct drm_freelist freelist;
 };
 
+/* Event queued up for userspace to read */
+struct drm_pending_event {
+	struct drm_event *event;
+	struct list_head link;
+	struct drm_file *file_priv;
+	void (*destroy)(struct drm_pending_event *event);
+};
+
 /** File private data */
 struct drm_file {
 	int authenticated;
@@ -449,6 +427,10 @@
 	struct drm_master *master; /* master this node is currently associated with
 				      N.B. not always minor->master */
 	struct list_head fbs;
+
+	wait_queue_head_t event_wait;
+	struct list_head event_list;
+	int event_space;
 };
 
 /** Wait queue */
@@ -795,6 +777,15 @@
 	/* Master routines */
 	int (*master_create)(struct drm_device *dev, struct drm_master *master);
 	void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
+	/**
+	 * master_set is called whenever the minor master is set.
+	 * master_drop is called whenever the minor master is dropped.
+	 */
+
+	int (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
+			  bool from_open);
+	void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv,
+			    bool from_release);
 
 	int (*proc_init)(struct drm_minor *minor);
 	void (*proc_cleanup)(struct drm_minor *minor);
@@ -900,6 +891,12 @@
 	struct drm_mode_group mode_group;
 };
 
+struct drm_pending_vblank_event {
+	struct drm_pending_event base;
+	int pipe;
+	struct drm_event_vblank event;
+};
+
 /**
  * DRM device structure. This structure represent a complete card that
  * may contain multiple heads.
@@ -999,6 +996,12 @@
 
 	u32 max_vblank_count;           /**< size of vblank counter register */
 
+	/**
+	 * List of events
+	 */
+	struct list_head vblank_event_list;
+	spinlock_t event_lock;
+
 	/*@} */
 	cycles_t ctx_start;
 	cycles_t lck_start;
@@ -1135,6 +1138,8 @@
 extern int drm_open(struct inode *inode, struct file *filp);
 extern int drm_stub_open(struct inode *inode, struct file *filp);
 extern int drm_fasync(int fd, struct file *filp, int on);
+extern ssize_t drm_read(struct file *filp, char __user *buffer,
+			size_t count, loff_t *offset);
 extern int drm_release(struct inode *inode, struct file *filp);
 
 				/* Mapping support (drm_vm.h) */
@@ -1295,6 +1300,7 @@
 extern void drm_handle_vblank(struct drm_device *dev, int crtc);
 extern int drm_vblank_get(struct drm_device *dev, int crtc);
 extern void drm_vblank_put(struct drm_device *dev, int crtc);
+extern void drm_vblank_off(struct drm_device *dev, int crtc);
 extern void drm_vblank_cleanup(struct drm_device *dev);
 /* Modesetting support */
 extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
@@ -1519,16 +1525,29 @@
 
 static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
 {
-	if (size * nmemb <= PAGE_SIZE)
-	    return kcalloc(nmemb, size, GFP_KERNEL);
-
 	if (size != 0 && nmemb > ULONG_MAX / size)
 		return NULL;
 
+	if (size * nmemb <= PAGE_SIZE)
+	    return kcalloc(nmemb, size, GFP_KERNEL);
+
 	return __vmalloc(size * nmemb,
 			 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
 }
 
+/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
+static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
+{
+	if (size != 0 && nmemb > ULONG_MAX / size)
+		return NULL;
+
+	if (size * nmemb <= PAGE_SIZE)
+	    return kmalloc(nmemb * size, GFP_KERNEL);
+
+	return __vmalloc(size * nmemb,
+			 GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
+}
+
 static __inline void drm_free_large(void *ptr)
 {
 	if (!is_vmalloc_addr(ptr))
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index b69347b..fdf43ab 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -123,7 +123,7 @@
 	int type;
 
 	/* Proposed mode values */
-	int clock;
+	int clock;		/* in kHz */
 	int hdisplay;
 	int hsync_start;
 	int hsync_end;
@@ -164,8 +164,8 @@
 	int *private;
 	int private_flags;
 
-	int vrefresh;
-	float hsync;
+	int vrefresh;		/* in Hz */
+	int hsync;		/* in kHz */
 };
 
 enum drm_connector_status {
@@ -242,6 +242,21 @@
 	int (*create_handle)(struct drm_framebuffer *fb,
 			     struct drm_file *file_priv,
 			     unsigned int *handle);
+	/**
+	 * Optinal callback for the dirty fb ioctl.
+	 *
+	 * Userspace can notify the driver via this callback
+	 * that a area of the framebuffer has changed and should
+	 * be flushed to the display hardware.
+	 *
+	 * See documentation in drm_mode.h for the struct
+	 * drm_mode_fb_dirty_cmd for more information as all
+	 * the semantics and arguments have a one to one mapping
+	 * on this function.
+	 */
+	int (*dirty)(struct drm_framebuffer *framebuffer, unsigned flags,
+		     unsigned color, struct drm_clip_rect *clips,
+		     unsigned num_clips);
 };
 
 struct drm_framebuffer {
@@ -256,7 +271,7 @@
 	unsigned int depth;
 	int bits_per_pixel;
 	int flags;
-	void *fbdev;
+	struct fb_info *fbdev;
 	u32 pseudo_palette[17];
 	struct list_head filp_head;
 	/* if you are using the helper */
@@ -290,6 +305,7 @@
 struct drm_crtc;
 struct drm_connector;
 struct drm_encoder;
+struct drm_pending_vblank_event;
 
 /**
  * drm_crtc_funcs - control CRTCs for a given device
@@ -333,6 +349,19 @@
 	void (*destroy)(struct drm_crtc *crtc);
 
 	int (*set_config)(struct drm_mode_set *set);
+
+	/*
+	 * Flip to the given framebuffer.  This implements the page
+	 * flip ioctl descibed in drm_mode.h, specifically, the
+	 * implementation must return immediately and block all
+	 * rendering to the current fb until the flip has completed.
+	 * If userspace set the event flag in the ioctl, the event
+	 * argument will point to an event to send back when the flip
+	 * completes, otherwise it will be NULL.
+	 */
+	int (*page_flip)(struct drm_crtc *crtc,
+			 struct drm_framebuffer *fb,
+			 struct drm_pending_vblank_event *event);
 };
 
 /**
@@ -596,6 +625,7 @@
 	/* Optional properties */
 	struct drm_property *scaling_mode_property;
 	struct drm_property *dithering_mode_property;
+	struct drm_property *dirty_info_property;
 };
 
 #define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
@@ -667,6 +697,7 @@
 extern void drm_mode_prune_invalid(struct drm_device *dev,
 				   struct list_head *mode_list, bool verbose);
 extern void drm_mode_sort(struct list_head *mode_list);
+extern int drm_mode_hsync(struct drm_display_mode *mode);
 extern int drm_mode_vrefresh(struct drm_display_mode *mode);
 extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
 				  int adjust_flags);
@@ -703,6 +734,7 @@
 				     char *formats[]);
 extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
 extern int drm_mode_create_dithering_property(struct drm_device *dev);
+extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
 extern char *drm_get_encoder_name(struct drm_encoder *encoder);
 
 extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
@@ -711,7 +743,8 @@
 					   struct drm_encoder *encoder);
 extern bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
 					 int gamma_size);
-extern void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type);
+extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+		uint32_t id, uint32_t type);
 /* IOCTLs */
 extern int drm_mode_getresources(struct drm_device *dev,
 				 void *data, struct drm_file *file_priv);
@@ -730,6 +763,8 @@
 			 void *data, struct drm_file *file_priv);
 extern int drm_mode_getfb(struct drm_device *dev,
 			  void *data, struct drm_file *file_priv);
+extern int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+				  void *data, struct drm_file *file_priv);
 extern int drm_mode_addmode_ioctl(struct drm_device *dev,
 				  void *data, struct drm_file *file_priv);
 extern int drm_mode_rmmode_ioctl(struct drm_device *dev,
@@ -756,6 +791,8 @@
 extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
 				    void *data, struct drm_file *file_priv);
 extern bool drm_detect_hdmi_monitor(struct edid *edid);
+extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
+				    void *data, struct drm_file *file_priv);
 extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
 				int hdisplay, int vdisplay, int vrefresh,
 				bool reduced, bool interlaced, bool margins);
diff --git a/drivers/gpu/drm/i915/intel_dp.h b/include/drm/drm_dp_helper.h
similarity index 70%
rename from drivers/gpu/drm/i915/intel_dp.h
rename to include/drm/drm_dp_helper.h
index 2b38054..a49e791 100644
--- a/drivers/gpu/drm/i915/intel_dp.h
+++ b/include/drm/drm_dp_helper.h
@@ -20,8 +20,8 @@
  * OF THIS SOFTWARE.
  */
 
-#ifndef _INTEL_DP_H_
-#define _INTEL_DP_H_
+#ifndef _DRM_DP_HELPER_H_
+#define _DRM_DP_HELPER_H_
 
 /* From the VESA DisplayPort spec */
 
@@ -43,16 +43,41 @@
 #define AUX_I2C_REPLY_MASK	(0x3 << 6)
 
 /* AUX CH addresses */
-#define	DP_LINK_BW_SET		0x100
+/* DPCD */
+#define DP_DPCD_REV                         0x000
+
+#define DP_MAX_LINK_RATE                    0x001
+
+#define DP_MAX_LANE_COUNT                   0x002
+# define DP_MAX_LANE_COUNT_MASK		    0x1f
+# define DP_ENHANCED_FRAME_CAP		    (1 << 7)
+
+#define DP_MAX_DOWNSPREAD                   0x003
+# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING  (1 << 6)
+
+#define DP_NORP                             0x004
+
+#define DP_DOWNSTREAMPORT_PRESENT           0x005
+# define DP_DWN_STRM_PORT_PRESENT           (1 << 0)
+# define DP_DWN_STRM_PORT_TYPE_MASK         0x06
+/* 00b = DisplayPort */
+/* 01b = Analog */
+/* 10b = TMDS or HDMI */
+/* 11b = Other */
+# define DP_FORMAT_CONVERSION               (1 << 3)
+
+#define DP_MAIN_LINK_CHANNEL_CODING         0x006
+
+/* link configuration */
+#define	DP_LINK_BW_SET		            0x100
 # define DP_LINK_BW_1_62		    0x06
 # define DP_LINK_BW_2_7			    0x0a
 
-#define DP_LANE_COUNT_SET	0x101
+#define DP_LANE_COUNT_SET	            0x101
 # define DP_LANE_COUNT_MASK		    0x0f
 # define DP_LANE_COUNT_ENHANCED_FRAME_EN    (1 << 7)
 
-#define DP_TRAINING_PATTERN_SET	0x102
-
+#define DP_TRAINING_PATTERN_SET	            0x102
 # define DP_TRAINING_PATTERN_DISABLE	    0
 # define DP_TRAINING_PATTERN_1		    1
 # define DP_TRAINING_PATTERN_2		    2
@@ -102,11 +127,14 @@
 
 #define DP_LANE0_1_STATUS		    0x202
 #define DP_LANE2_3_STATUS		    0x203
-
 # define DP_LANE_CR_DONE		    (1 << 0)
 # define DP_LANE_CHANNEL_EQ_DONE	    (1 << 1)
 # define DP_LANE_SYMBOL_LOCKED		    (1 << 2)
 
+#define DP_CHANNEL_EQ_BITS (DP_LANE_CR_DONE |		\
+			    DP_LANE_CHANNEL_EQ_DONE |	\
+			    DP_LANE_SYMBOL_LOCKED)
+
 #define DP_LANE_ALIGN_STATUS_UPDATED	    0x204
 
 #define DP_INTERLANE_ALIGN_DONE		    (1 << 0)
@@ -120,25 +148,33 @@
 
 #define DP_ADJUST_REQUEST_LANE0_1	    0x206
 #define DP_ADJUST_REQUEST_LANE2_3	    0x207
+# define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK  0x03
+# define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
+# define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK   0x0c
+# define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT  2
+# define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK  0x30
+# define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
+# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK   0xc0
+# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT  6
 
-#define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK  0x03
-#define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
-#define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK   0x0c
-#define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT  2
-#define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK  0x30
-#define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
-#define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK   0xc0
-#define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT  6
+#define DP_SET_POWER                        0x600
+# define DP_SET_POWER_D0                    0x1
+# define DP_SET_POWER_D3                    0x2
+
+#define MODE_I2C_START	1
+#define MODE_I2C_WRITE	2
+#define MODE_I2C_READ	4
+#define MODE_I2C_STOP	8
 
 struct i2c_algo_dp_aux_data {
 	bool running;
 	u16 address;
 	int (*aux_ch) (struct i2c_adapter *adapter,
-		       uint8_t *send, int send_bytes,
-		       uint8_t *recv, int recv_bytes);
+		       int mode, uint8_t write_byte,
+		       uint8_t *read_byte);
 };
 
 int
 i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
 
-#endif /* _INTEL_DP_H_ */
+#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 7d6c9a2..d33c3e0 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -106,6 +106,10 @@
 	u8 wpindex2[3];
 } __attribute__((packed));
 
+struct cvt_timing {
+	u8 code[3];
+} __attribute__((packed));
+
 struct detailed_non_pixel {
 	u8 pad1;
 	u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name
@@ -117,9 +121,13 @@
 		struct detailed_data_monitor_range range;
 		struct detailed_data_wpindex color;
 		struct std_timing timings[5];
+		struct cvt_timing cvt[4];
 	} data;
 } __attribute__((packed));
 
+#define EDID_DETAIL_EST_TIMINGS 0xf7
+#define EDID_DETAIL_CVT_3BYTE 0xf8
+#define EDID_DETAIL_COLOR_MGMT_DATA 0xf9
 #define EDID_DETAIL_STD_MODES 0xfa
 #define EDID_DETAIL_MONITOR_CPDATA 0xfb
 #define EDID_DETAIL_MONITOR_NAME 0xfc
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 62329f9..4c10be3 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -66,6 +66,13 @@
 						    unsigned long size,
 						    unsigned alignment,
 						    int atomic);
+extern struct drm_mm_node *drm_mm_get_block_range_generic(
+						struct drm_mm_node *node,
+						unsigned long size,
+						unsigned alignment,
+						unsigned long start,
+						unsigned long end,
+						int atomic);
 static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
 						   unsigned long size,
 						   unsigned alignment)
@@ -78,11 +85,38 @@
 {
 	return drm_mm_get_block_generic(parent, size, alignment, 1);
 }
+static inline struct drm_mm_node *drm_mm_get_block_range(
+						struct drm_mm_node *parent,
+						unsigned long size,
+						unsigned alignment,
+						unsigned long start,
+						unsigned long end)
+{
+	return drm_mm_get_block_range_generic(parent, size, alignment,
+						start, end, 0);
+}
+static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
+						struct drm_mm_node *parent,
+						unsigned long size,
+						unsigned alignment,
+						unsigned long start,
+						unsigned long end)
+{
+	return drm_mm_get_block_range_generic(parent, size, alignment,
+						start, end, 1);
+}
 extern void drm_mm_put_block(struct drm_mm_node *cur);
 extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
 					      unsigned long size,
 					      unsigned alignment,
 					      int best_match);
+extern struct drm_mm_node *drm_mm_search_free_in_range(
+						const struct drm_mm *mm,
+						unsigned long size,
+						unsigned alignment,
+						unsigned long start,
+						unsigned long end,
+						int best_match);
 extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
 		       unsigned long size);
 extern void drm_mm_takedown(struct drm_mm *mm);
@@ -99,6 +133,7 @@
 	return block->mm;
 }
 
+extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
 #ifdef CONFIG_DEBUG_FS
 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
 #endif
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index 1f90841..43009bc 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -27,9 +27,6 @@
 #ifndef _DRM_MODE_H
 #define _DRM_MODE_H
 
-#include <linux/kernel.h>
-#include <linux/types.h>
-
 #define DRM_DISPLAY_INFO_LEN	32
 #define DRM_CONNECTOR_NAME_LEN	32
 #define DRM_DISPLAY_MODE_LEN	32
@@ -78,6 +75,11 @@
 #define DRM_MODE_DITHERING_OFF	0
 #define DRM_MODE_DITHERING_ON	1
 
+/* Dirty info options */
+#define DRM_MODE_DIRTY_OFF      0
+#define DRM_MODE_DIRTY_ON       1
+#define DRM_MODE_DIRTY_ANNOTATE 2
+
 struct drm_mode_modeinfo {
 	__u32 clock;
 	__u16 hdisplay, hsync_start, hsync_end, htotal, hskew;
@@ -225,6 +227,45 @@
 	__u32 handle;
 };
 
+#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
+#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
+#define DRM_MODE_FB_DIRTY_FLAGS         0x03
+
+/*
+ * Mark a region of a framebuffer as dirty.
+ *
+ * Some hardware does not automatically update display contents
+ * as a hardware or software draw to a framebuffer. This ioctl
+ * allows userspace to tell the kernel and the hardware what
+ * regions of the framebuffer have changed.
+ *
+ * The kernel or hardware is free to update more then just the
+ * region specified by the clip rects. The kernel or hardware
+ * may also delay and/or coalesce several calls to dirty into a
+ * single update.
+ *
+ * Userspace may annotate the updates, the annotates are a
+ * promise made by the caller that the change is either a copy
+ * of pixels or a fill of a single color in the region specified.
+ *
+ * If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then
+ * the number of updated regions are half of num_clips given,
+ * where the clip rects are paired in src and dst. The width and
+ * height of each one of the pairs must match.
+ *
+ * If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller
+ * promises that the region specified of the clip rects is filled
+ * completely with a single color as given in the color argument.
+ */
+
+struct drm_mode_fb_dirty_cmd {
+	__u32 fb_id;
+	__u32 flags;
+	__u32 color;
+	__u32 num_clips;
+	__u64 clips_ptr;
+};
+
 struct drm_mode_mode_cmd {
 	__u32 connector_id;
 	struct drm_mode_modeinfo mode;
@@ -268,4 +309,37 @@
 	__u64 blue;
 };
 
+#define DRM_MODE_PAGE_FLIP_EVENT 0x01
+#define DRM_MODE_PAGE_FLIP_FLAGS DRM_MODE_PAGE_FLIP_EVENT
+
+/*
+ * Request a page flip on the specified crtc.
+ *
+ * This ioctl will ask KMS to schedule a page flip for the specified
+ * crtc.  Once any pending rendering targeting the specified fb (as of
+ * ioctl time) has completed, the crtc will be reprogrammed to display
+ * that fb after the next vertical refresh.  The ioctl returns
+ * immediately, but subsequent rendering to the current fb will block
+ * in the execbuffer ioctl until the page flip happens.  If a page
+ * flip is already pending as the ioctl is called, EBUSY will be
+ * returned.
+ *
+ * The ioctl supports one flag, DRM_MODE_PAGE_FLIP_EVENT, which will
+ * request that drm sends back a vblank event (see drm.h: struct
+ * drm_event_vblank) when the page flip is done.  The user_data field
+ * passed in with this ioctl will be returned as the user_data field
+ * in the vblank event struct.
+ *
+ * The reserved field must be zero until we figure out something
+ * clever to use it for.
+ */
+
+struct drm_mode_crtc_page_flip {
+	__u32 crtc_id;
+	__u32 fb_id;
+	__u32 flags;
+	__u32 reserved;
+	__u64 user_data;
+};
+
 #endif
diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h
index 26641e9..3933691 100644
--- a/include/drm/drm_os_linux.h
+++ b/include/drm/drm_os_linux.h
@@ -123,5 +123,5 @@
 	remove_wait_queue(&(queue), &entry);			\
 } while (0)
 
-#define DRM_WAKEUP( queue ) wake_up_interruptible( queue )
+#define DRM_WAKEUP( queue ) wake_up( queue )
 #define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
diff --git a/include/drm/i2c/ch7006.h b/include/drm/i2c/ch7006.h
new file mode 100644
index 0000000..8390b43
--- /dev/null
+++ b/include/drm/i2c/ch7006.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __DRM_I2C_CH7006_H__
+#define __DRM_I2C_CH7006_H__
+
+/**
+ * struct ch7006_encoder_params
+ *
+ * Describes how the ch7006 is wired up with the GPU. It should be
+ * used as the @params parameter of its @set_config method.
+ *
+ * See "http://www.chrontel.com/pdf/7006.pdf" for their precise
+ * meaning.
+ */
+struct ch7006_encoder_params {
+	enum {
+		CH7006_FORMAT_RGB16 = 0,
+		CH7006_FORMAT_YCrCb24m16,
+		CH7006_FORMAT_RGB24m16,
+		CH7006_FORMAT_RGB15,
+		CH7006_FORMAT_RGB24m12C,
+		CH7006_FORMAT_RGB24m12I,
+		CH7006_FORMAT_RGB24m8,
+		CH7006_FORMAT_RGB16m8,
+		CH7006_FORMAT_RGB15m8,
+		CH7006_FORMAT_YCrCb24m8,
+	} input_format;
+
+	enum {
+		CH7006_CLOCK_SLAVE = 0,
+		CH7006_CLOCK_MASTER,
+	} clock_mode;
+
+	enum {
+		CH7006_CLOCK_EDGE_NEG = 0,
+		CH7006_CLOCK_EDGE_POS,
+	} clock_edge;
+
+	int xcm, pcm;
+
+	enum {
+		CH7006_SYNC_SLAVE = 0,
+		CH7006_SYNC_MASTER,
+	} sync_direction;
+
+	enum {
+		CH7006_SYNC_SEPARATED = 0,
+		CH7006_SYNC_EMBEDDED,
+	} sync_encoding;
+
+	enum {
+		CH7006_POUT_1_8V = 0,
+		CH7006_POUT_3_3V,
+	} pout_level;
+
+	enum {
+		CH7006_ACTIVE_HSYNC = 0,
+		CH7006_ACTIVE_DSTART,
+	} active_detect;
+};
+
+#endif
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 7e0cb1d..ec3f5e80 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -27,11 +27,11 @@
 #ifndef _I915_DRM_H_
 #define _I915_DRM_H_
 
+#include "drm.h"
+
 /* Please note that modifications to all structs defined here are
  * subject to backwards-compatibility constraints.
  */
-#include <linux/types.h>
-#include "drm.h"
 
 /* Each region is a minimum of 16k, and there are at most 255 of them.
  */
@@ -186,6 +186,8 @@
 #define DRM_I915_GEM_MMAP_GTT	0x24
 #define DRM_I915_GET_PIPE_FROM_CRTC_ID	0x25
 #define DRM_I915_GEM_MADVISE	0x26
+#define DRM_I915_OVERLAY_PUT_IMAGE	0x27
+#define DRM_I915_OVERLAY_ATTRS	0x28
 
 #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
 #define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -221,8 +223,10 @@
 #define DRM_IOCTL_I915_GEM_SET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
 #define DRM_IOCTL_I915_GEM_GET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
 #define DRM_IOCTL_I915_GEM_GET_APERTURE	DRM_IOR  (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
-#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_intel_get_pipe_from_crtc_id)
+#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
 #define DRM_IOCTL_I915_GEM_MADVISE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
+#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE	DRM_IOW(DRM_COMMAND_BASE + DRM_IOCTL_I915_OVERLAY_ATTRS, struct drm_intel_overlay_put_image)
+#define DRM_IOCTL_I915_OVERLAY_ATTRS	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
 
 /* Allow drivers to submit batchbuffers directly to hardware, relying
  * on the security mechanisms provided by hardware.
@@ -266,6 +270,8 @@
 #define I915_PARAM_CHIPSET_ID            4
 #define I915_PARAM_HAS_GEM               5
 #define I915_PARAM_NUM_FENCES_AVAIL      6
+#define I915_PARAM_HAS_OVERLAY           7
+#define I915_PARAM_HAS_PAGEFLIPPING	 8
 
 typedef struct drm_i915_getparam {
 	int param;
@@ -686,4 +692,70 @@
 	__u32 retained;
 };
 
+/* flags */
+#define I915_OVERLAY_TYPE_MASK 		0xff
+#define I915_OVERLAY_YUV_PLANAR 	0x01
+#define I915_OVERLAY_YUV_PACKED 	0x02
+#define I915_OVERLAY_RGB		0x03
+
+#define I915_OVERLAY_DEPTH_MASK		0xff00
+#define I915_OVERLAY_RGB24		0x1000
+#define I915_OVERLAY_RGB16		0x2000
+#define I915_OVERLAY_RGB15		0x3000
+#define I915_OVERLAY_YUV422		0x0100
+#define I915_OVERLAY_YUV411		0x0200
+#define I915_OVERLAY_YUV420		0x0300
+#define I915_OVERLAY_YUV410		0x0400
+
+#define I915_OVERLAY_SWAP_MASK		0xff0000
+#define I915_OVERLAY_NO_SWAP		0x000000
+#define I915_OVERLAY_UV_SWAP		0x010000
+#define I915_OVERLAY_Y_SWAP		0x020000
+#define I915_OVERLAY_Y_AND_UV_SWAP	0x030000
+
+#define I915_OVERLAY_FLAGS_MASK		0xff000000
+#define I915_OVERLAY_ENABLE		0x01000000
+
+struct drm_intel_overlay_put_image {
+	/* various flags and src format description */
+	__u32 flags;
+	/* source picture description */
+	__u32 bo_handle;
+	/* stride values and offsets are in bytes, buffer relative */
+	__u16 stride_Y; /* stride for packed formats */
+	__u16 stride_UV;
+	__u32 offset_Y; /* offset for packet formats */
+	__u32 offset_U;
+	__u32 offset_V;
+	/* in pixels */
+	__u16 src_width;
+	__u16 src_height;
+	/* to compensate the scaling factors for partially covered surfaces */
+	__u16 src_scan_width;
+	__u16 src_scan_height;
+	/* output crtc description */
+	__u32 crtc_id;
+	__u16 dst_x;
+	__u16 dst_y;
+	__u16 dst_width;
+	__u16 dst_height;
+};
+
+/* flags */
+#define I915_OVERLAY_UPDATE_ATTRS	(1<<0)
+#define I915_OVERLAY_UPDATE_GAMMA	(1<<1)
+struct drm_intel_overlay_attrs {
+	__u32 flags;
+	__u32 color_key;
+	__s32 brightness;
+	__u32 contrast;
+	__u32 saturation;
+	__u32 gamma0;
+	__u32 gamma1;
+	__u32 gamma2;
+	__u32 gamma3;
+	__u32 gamma4;
+	__u32 gamma5;
+};
+
 #endif				/* _I915_DRM_H_ */
diff --git a/include/drm/mga_drm.h b/include/drm/mga_drm.h
index 325fd6f..3ffbc47 100644
--- a/include/drm/mga_drm.h
+++ b/include/drm/mga_drm.h
@@ -35,7 +35,7 @@
 #ifndef __MGA_DRM_H__
 #define __MGA_DRM_H__
 
-#include <linux/types.h>
+#include "drm.h"
 
 /* WARNING: If you change any of these defines, make sure to change the
  * defines in the Xserver file (mga_sarea.h)
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
new file mode 100644
index 0000000..1e67c44
--- /dev/null
+++ b/include/drm/nouveau_drm.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2005 Stephane Marchesin.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_DRM_H__
+#define __NOUVEAU_DRM_H__
+
+#define NOUVEAU_DRM_HEADER_PATCHLEVEL 15
+
+struct drm_nouveau_channel_alloc {
+	uint32_t     fb_ctxdma_handle;
+	uint32_t     tt_ctxdma_handle;
+
+	int          channel;
+
+	/* Notifier memory */
+	uint32_t     notifier_handle;
+
+	/* DRM-enforced subchannel assignments */
+	struct {
+		uint32_t handle;
+		uint32_t grclass;
+	} subchan[8];
+	uint32_t nr_subchan;
+};
+
+struct drm_nouveau_channel_free {
+	int channel;
+};
+
+struct drm_nouveau_grobj_alloc {
+	int      channel;
+	uint32_t handle;
+	int      class;
+};
+
+struct drm_nouveau_notifierobj_alloc {
+	uint32_t channel;
+	uint32_t handle;
+	uint32_t size;
+	uint32_t offset;
+};
+
+struct drm_nouveau_gpuobj_free {
+	int      channel;
+	uint32_t handle;
+};
+
+/* FIXME : maybe unify {GET,SET}PARAMs */
+#define NOUVEAU_GETPARAM_PCI_VENDOR      3
+#define NOUVEAU_GETPARAM_PCI_DEVICE      4
+#define NOUVEAU_GETPARAM_BUS_TYPE        5
+#define NOUVEAU_GETPARAM_FB_PHYSICAL     6
+#define NOUVEAU_GETPARAM_AGP_PHYSICAL    7
+#define NOUVEAU_GETPARAM_FB_SIZE         8
+#define NOUVEAU_GETPARAM_AGP_SIZE        9
+#define NOUVEAU_GETPARAM_PCI_PHYSICAL    10
+#define NOUVEAU_GETPARAM_CHIPSET_ID      11
+#define NOUVEAU_GETPARAM_VM_VRAM_BASE    12
+struct drm_nouveau_getparam {
+	uint64_t param;
+	uint64_t value;
+};
+
+struct drm_nouveau_setparam {
+	uint64_t param;
+	uint64_t value;
+};
+
+#define NOUVEAU_GEM_DOMAIN_CPU       (1 << 0)
+#define NOUVEAU_GEM_DOMAIN_VRAM      (1 << 1)
+#define NOUVEAU_GEM_DOMAIN_GART      (1 << 2)
+#define NOUVEAU_GEM_DOMAIN_MAPPABLE  (1 << 3)
+
+struct drm_nouveau_gem_info {
+	uint32_t handle;
+	uint32_t domain;
+	uint64_t size;
+	uint64_t offset;
+	uint64_t map_handle;
+	uint32_t tile_mode;
+	uint32_t tile_flags;
+};
+
+struct drm_nouveau_gem_new {
+	struct drm_nouveau_gem_info info;
+	uint32_t channel_hint;
+	uint32_t align;
+};
+
+struct drm_nouveau_gem_pushbuf_bo {
+	uint64_t user_priv;
+	uint32_t handle;
+	uint32_t read_domains;
+	uint32_t write_domains;
+	uint32_t valid_domains;
+	uint32_t presumed_ok;
+	uint32_t presumed_domain;
+	uint64_t presumed_offset;
+};
+
+#define NOUVEAU_GEM_RELOC_LOW  (1 << 0)
+#define NOUVEAU_GEM_RELOC_HIGH (1 << 1)
+#define NOUVEAU_GEM_RELOC_OR   (1 << 2)
+struct drm_nouveau_gem_pushbuf_reloc {
+	uint32_t bo_index;
+	uint32_t reloc_index;
+	uint32_t flags;
+	uint32_t data;
+	uint32_t vor;
+	uint32_t tor;
+};
+
+#define NOUVEAU_GEM_MAX_BUFFERS 1024
+#define NOUVEAU_GEM_MAX_RELOCS 1024
+
+struct drm_nouveau_gem_pushbuf {
+	uint32_t channel;
+	uint32_t nr_dwords;
+	uint32_t nr_buffers;
+	uint32_t nr_relocs;
+	uint64_t dwords;
+	uint64_t buffers;
+	uint64_t relocs;
+};
+
+struct drm_nouveau_gem_pushbuf_call {
+	uint32_t channel;
+	uint32_t handle;
+	uint32_t offset;
+	uint32_t nr_buffers;
+	uint32_t nr_relocs;
+	uint32_t nr_dwords;
+	uint64_t buffers;
+	uint64_t relocs;
+	uint32_t suffix0;
+	uint32_t suffix1;
+	/* below only accessed for CALL2 */
+	uint64_t vram_available;
+	uint64_t gart_available;
+};
+
+struct drm_nouveau_gem_pin {
+	uint32_t handle;
+	uint32_t domain;
+	uint64_t offset;
+};
+
+struct drm_nouveau_gem_unpin {
+	uint32_t handle;
+};
+
+#define NOUVEAU_GEM_CPU_PREP_NOWAIT                                  0x00000001
+#define NOUVEAU_GEM_CPU_PREP_NOBLOCK                                 0x00000002
+#define NOUVEAU_GEM_CPU_PREP_WRITE                                   0x00000004
+struct drm_nouveau_gem_cpu_prep {
+	uint32_t handle;
+	uint32_t flags;
+};
+
+struct drm_nouveau_gem_cpu_fini {
+	uint32_t handle;
+};
+
+struct drm_nouveau_gem_tile {
+	uint32_t handle;
+	uint32_t offset;
+	uint32_t size;
+	uint32_t tile_mode;
+	uint32_t tile_flags;
+};
+
+enum nouveau_bus_type {
+	NV_AGP     = 0,
+	NV_PCI     = 1,
+	NV_PCIE    = 2,
+};
+
+struct drm_nouveau_sarea {
+};
+
+#define DRM_NOUVEAU_CARD_INIT          0x00
+#define DRM_NOUVEAU_GETPARAM           0x01
+#define DRM_NOUVEAU_SETPARAM           0x02
+#define DRM_NOUVEAU_CHANNEL_ALLOC      0x03
+#define DRM_NOUVEAU_CHANNEL_FREE       0x04
+#define DRM_NOUVEAU_GROBJ_ALLOC        0x05
+#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC  0x06
+#define DRM_NOUVEAU_GPUOBJ_FREE        0x07
+#define DRM_NOUVEAU_GEM_NEW            0x40
+#define DRM_NOUVEAU_GEM_PUSHBUF        0x41
+#define DRM_NOUVEAU_GEM_PUSHBUF_CALL   0x42
+#define DRM_NOUVEAU_GEM_PIN            0x43 /* !KMS only */
+#define DRM_NOUVEAU_GEM_UNPIN          0x44 /* !KMS only */
+#define DRM_NOUVEAU_GEM_CPU_PREP       0x45
+#define DRM_NOUVEAU_GEM_CPU_FINI       0x46
+#define DRM_NOUVEAU_GEM_INFO           0x47
+#define DRM_NOUVEAU_GEM_PUSHBUF_CALL2  0x48
+
+#endif /* __NOUVEAU_DRM_H__ */
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index 3b9932a..39537f3 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -33,7 +33,7 @@
 #ifndef __RADEON_DRM_H__
 #define __RADEON_DRM_H__
 
-#include <linux/types.h>
+#include "drm.h"
 
 /* WARNING: If you change any of these defines, make sure to change the
  * defines in the X server file (radeon_sarea.h)
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 4911461..81eb9f4 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -44,6 +44,29 @@
 
 struct drm_mm_node;
 
+
+/**
+ * struct ttm_placement
+ *
+ * @fpfn:		first valid page frame number to put the object
+ * @lpfn:		last valid page frame number to put the object
+ * @num_placement:	number of prefered placements
+ * @placement:		prefered placements
+ * @num_busy_placement:	number of prefered placements when need to evict buffer
+ * @busy_placement:	prefered placements when need to evict buffer
+ *
+ * Structure indicating the placement you request for an object.
+ */
+struct ttm_placement {
+	unsigned	fpfn;
+	unsigned	lpfn;
+	unsigned	num_placement;
+	const uint32_t	*placement;
+	unsigned	num_busy_placement;
+	const uint32_t	*busy_placement;
+};
+
+
 /**
  * struct ttm_mem_reg
  *
@@ -109,10 +132,6 @@
  * the object is destroyed.
  * @event_queue: Queue for processes waiting on buffer object status change.
  * @lock: spinlock protecting mostly synchronization members.
- * @proposed_placement: Proposed placement for the buffer. Changed only by the
- * creator prior to validation as opposed to bo->mem.proposed_flags which is
- * changed by the implementation prior to a buffer move if it wants to outsmart
- * the buffer creator / user. This latter happens, for example, at eviction.
  * @mem: structure describing current placement.
  * @persistant_swap_storage: Usually the swap storage is deleted for buffers
  * pinned in physical memory. If this behaviour is not desired, this member
@@ -177,7 +196,6 @@
 	 * Members protected by the bo::reserved lock.
 	 */
 
-	uint32_t proposed_placement;
 	struct ttm_mem_reg mem;
 	struct file *persistant_swap_storage;
 	struct ttm_tt *ttm;
@@ -285,29 +303,30 @@
  * Note: It might be necessary to block validations before the
  * wait by reserving the buffer.
  * Returns -EBUSY if no_wait is true and the buffer is busy.
- * Returns -ERESTART if interrupted by a signal.
+ * Returns -ERESTARTSYS if interrupted by a signal.
  */
 extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
 		       bool interruptible, bool no_wait);
 /**
- * ttm_buffer_object_validate
+ * ttm_bo_validate
  *
  * @bo: The buffer object.
- * @proposed_placement: Proposed_placement for the buffer object.
+ * @placement: Proposed placement for the buffer object.
  * @interruptible: Sleep interruptible if sleeping.
  * @no_wait: Return immediately if the buffer is busy.
  *
  * Changes placement and caching policy of the buffer object
- * according to bo::proposed_flags.
+ * according proposed placement.
  * Returns
- * -EINVAL on invalid proposed_flags.
+ * -EINVAL on invalid proposed placement.
  * -ENOMEM on out-of-memory condition.
  * -EBUSY if no_wait is true and buffer busy.
- * -ERESTART if interrupted by a signal.
+ * -ERESTARTSYS if interrupted by a signal.
  */
-extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
-				      uint32_t proposed_placement,
-				      bool interruptible, bool no_wait);
+extern int ttm_bo_validate(struct ttm_buffer_object *bo,
+				struct ttm_placement *placement,
+				bool interruptible, bool no_wait);
+
 /**
  * ttm_bo_unref
  *
@@ -328,7 +347,7 @@
  * waiting for buffer idle. This lock is recursive.
  * Returns
  * -EBUSY if the buffer is busy and no_wait is true.
- * -ERESTART if interrupted by a signal.
+ * -ERESTARTSYS if interrupted by a signal.
  */
 
 extern int
@@ -343,7 +362,7 @@
 extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
 
 /**
- * ttm_buffer_object_init
+ * ttm_bo_init
  *
  * @bdev: Pointer to a ttm_bo_device struct.
  * @bo: Pointer to a ttm_buffer_object to be initialized.
@@ -371,20 +390,20 @@
  * Returns
  * -ENOMEM: Out of memory.
  * -EINVAL: Invalid placement flags.
- * -ERESTART: Interrupted by signal while sleeping waiting for resources.
+ * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
  */
 
-extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
-				  struct ttm_buffer_object *bo,
-				  unsigned long size,
-				  enum ttm_bo_type type,
-				  uint32_t flags,
-				  uint32_t page_alignment,
-				  unsigned long buffer_start,
-				  bool interrubtible,
-				  struct file *persistant_swap_storage,
-				  size_t acc_size,
-				  void (*destroy) (struct ttm_buffer_object *));
+extern int ttm_bo_init(struct ttm_bo_device *bdev,
+			struct ttm_buffer_object *bo,
+			unsigned long size,
+			enum ttm_bo_type type,
+			struct ttm_placement *placement,
+			uint32_t page_alignment,
+			unsigned long buffer_start,
+			bool interrubtible,
+			struct file *persistant_swap_storage,
+			size_t acc_size,
+			void (*destroy) (struct ttm_buffer_object *));
 /**
  * ttm_bo_synccpu_object_init
  *
@@ -405,47 +424,43 @@
  * GEM user interface.
  * @p_bo: On successful completion *p_bo points to the created object.
  *
- * This function allocates a ttm_buffer_object, and then calls
- * ttm_buffer_object_init on that object.
- * The destroy function is set to kfree().
+ * This function allocates a ttm_buffer_object, and then calls ttm_bo_init
+ * on that object. The destroy function is set to kfree().
  * Returns
  * -ENOMEM: Out of memory.
  * -EINVAL: Invalid placement flags.
- * -ERESTART: Interrupted by signal while waiting for resources.
+ * -ERESTARTSYS: Interrupted by signal while waiting for resources.
  */
 
-extern int ttm_buffer_object_create(struct ttm_bo_device *bdev,
-				    unsigned long size,
-				    enum ttm_bo_type type,
-				    uint32_t flags,
-				    uint32_t page_alignment,
-				    unsigned long buffer_start,
-				    bool interruptible,
-				    struct file *persistant_swap_storage,
-				    struct ttm_buffer_object **p_bo);
+extern int ttm_bo_create(struct ttm_bo_device *bdev,
+				unsigned long size,
+				enum ttm_bo_type type,
+				struct ttm_placement *placement,
+				uint32_t page_alignment,
+				unsigned long buffer_start,
+				bool interruptible,
+				struct file *persistant_swap_storage,
+				struct ttm_buffer_object **p_bo);
 
 /**
  * ttm_bo_check_placement
  *
- * @bo: the buffer object.
- * @set_flags: placement flags to set.
- * @clr_flags: placement flags to clear.
+ * @bo:		the buffer object.
+ * @placement:	placements
  *
  * Performs minimal validity checking on an intended change of
  * placement flags.
  * Returns
  * -EINVAL: Intended change is invalid or not allowed.
  */
-
 extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
-				  uint32_t set_flags, uint32_t clr_flags);
+					struct ttm_placement *placement);
 
 /**
  * ttm_bo_init_mm
  *
  * @bdev: Pointer to a ttm_bo_device struct.
  * @mem_type: The memory type.
- * @p_offset: offset for managed area in pages.
  * @p_size: size managed area in pages.
  *
  * Initialize a manager for a given memory type.
@@ -458,7 +473,7 @@
  */
 
 extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
-			  unsigned long p_offset, unsigned long p_size);
+				unsigned long p_size);
 /**
  * ttm_bo_clean_mm
  *
@@ -503,7 +518,7 @@
  *
  * Returns:
  * -EINVAL: Invalid or uninitialized memory type.
- * -ERESTART: The call was interrupted by a signal while waiting to
+ * -ERESTARTSYS: The call was interrupted by a signal while waiting to
  * evict a buffer.
  */
 
@@ -606,7 +621,7 @@
  * be called from the fops::read and fops::write method.
  * Returns:
  * See man (2) write, man(2) read. In particular,
- * the function may return -EINTR if
+ * the function may return -ERESTARTSYS if
  * interrupted by a signal.
  */
 
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index e8cd6d2..ff7664e 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -242,12 +242,6 @@
 /**
  * struct ttm_bo_driver
  *
- * @mem_type_prio: Priority array of memory types to place a buffer object in
- * if it fits without evicting buffers from any of these memory types.
- * @mem_busy_prio: Priority array of memory types to place a buffer object in
- * if it needs to evict buffers to make room.
- * @num_mem_type_prio: Number of elements in the @mem_type_prio array.
- * @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
  * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
  * @invalidate_caches: Callback to invalidate read caches when a buffer object
  * has been evicted.
@@ -265,11 +259,6 @@
  */
 
 struct ttm_bo_driver {
-	const uint32_t *mem_type_prio;
-	const uint32_t *mem_busy_prio;
-	uint32_t num_mem_type_prio;
-	uint32_t num_mem_busy_prio;
-
 	/**
 	 * struct ttm_bo_driver member create_ttm_backend_entry
 	 *
@@ -306,7 +295,8 @@
 	 * finished, they'll end up in bo->mem.flags
 	 */
 
-	 uint32_t(*evict_flags) (struct ttm_buffer_object *bo);
+	 void(*evict_flags) (struct ttm_buffer_object *bo,
+				struct ttm_placement *placement);
 	/**
 	 * struct ttm_bo_driver member move:
 	 *
@@ -545,6 +535,15 @@
 extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
 
 /**
+ * ttm_tt_populate:
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Add backing pages to all of @ttm
+ */
+extern int ttm_tt_populate(struct ttm_tt *ttm);
+
+/**
  * ttm_ttm_destroy:
  *
  * @ttm: The struct ttm_tt.
@@ -639,12 +638,12 @@
  * -EBUSY: No space available (only if no_wait == 1).
  * -ENOMEM: Could not allocate memory for the buffer object, either due to
  * fragmentation or concurrent allocators.
- * -ERESTART: An interruptible sleep was interrupted by a signal.
+ * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
  */
 extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
-			    uint32_t proposed_placement,
-			    struct ttm_mem_reg *mem,
-			    bool interruptible, bool no_wait);
+				struct ttm_placement *placement,
+				struct ttm_mem_reg *mem,
+				bool interruptible, bool no_wait);
 /**
  * ttm_bo_wait_for_cpu
  *
@@ -654,7 +653,7 @@
  * Wait until a buffer object is no longer sync'ed for CPU access.
  * Returns:
  * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
- * -ERESTART: An interruptible sleep was interrupted by a signal.
+ * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
  */
 
 extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
@@ -758,7 +757,7 @@
  * -EAGAIN: The reservation may cause a deadlock.
  * Release all buffer reservations, wait for @bo to become unreserved and
  * try again. (only if use_sequence == 1).
- * -ERESTART: A wait for the buffer to become unreserved was interrupted by
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
  * a signal. Release all buffer reservations and return to user-space.
  */
 extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
@@ -799,7 +798,7 @@
  *
  * Returns:
  * -EBUSY: If no_wait == 1 and the buffer is already reserved.
- * -ERESTART: If interruptible == 1 and the process received a signal
+ * -ERESTARTSYS: If interruptible == 1 and the process received a signal
  * while sleeping.
  */
 extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
new file mode 100644
index 0000000..cd2c475
--- /dev/null
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -0,0 +1,107 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#ifndef _TTM_EXECBUF_UTIL_H_
+#define _TTM_EXECBUF_UTIL_H_
+
+#include "ttm/ttm_bo_api.h"
+#include <linux/list.h>
+
+/**
+ * struct ttm_validate_buffer
+ *
+ * @head:           list head for thread-private list.
+ * @bo:             refcounted buffer object pointer.
+ * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
+ * adding a new sync object.
+ * @reservied:      Indicates whether @bo has been reserved for validation.
+ */
+
+struct ttm_validate_buffer {
+	struct list_head head;
+	struct ttm_buffer_object *bo;
+	void *new_sync_obj_arg;
+	bool reserved;
+};
+
+/**
+ * function ttm_eu_backoff_reservation
+ *
+ * @list:     thread private list of ttm_validate_buffer structs.
+ *
+ * Undoes all buffer validation reservations for bos pointed to by
+ * the list entries.
+ */
+
+extern void ttm_eu_backoff_reservation(struct list_head *list);
+
+/**
+ * function ttm_eu_reserve_buffers
+ *
+ * @list:    thread private list of ttm_validate_buffer structs.
+ * @val_seq: A unique sequence number.
+ *
+ * Tries to reserve bos pointed to by the list entries for validation.
+ * If the function returns 0, all buffers are marked as "unfenced",
+ * taken off the lru lists and are not synced for write CPU usage.
+ *
+ * If the function detects a deadlock due to multiple threads trying to
+ * reserve the same buffers in reverse order, all threads except one will
+ * back off and retry. This function may sleep while waiting for
+ * CPU write reservations to be cleared, and for other threads to
+ * unreserve their buffers.
+ *
+ * This function may return -ERESTART or -EAGAIN if the calling process
+ * receives a signal while waiting. In that case, no buffers on the list
+ * will be reserved upon return.
+ *
+ * Buffers reserved by this function should be unreserved by
+ * a call to either ttm_eu_backoff_reservation() or
+ * ttm_eu_fence_buffer_objects() when command submission is complete or
+ * has failed.
+ */
+
+extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq);
+
+/**
+ * function ttm_eu_fence_buffer_objects.
+ *
+ * @list:        thread private list of ttm_validate_buffer structs.
+ * @sync_obj:    The new sync object for the buffers.
+ *
+ * This function should be called when command submission is complete, and
+ * it will add a new sync object to bos pointed to by entries on @list.
+ * It also unreserves all buffers, putting them on lru lists.
+ *
+ */
+
+extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj);
+
+#endif
diff --git a/include/drm/ttm/ttm_lock.h b/include/drm/ttm/ttm_lock.h
new file mode 100644
index 0000000..81ba0b0
--- /dev/null
+++ b/include/drm/ttm/ttm_lock.h
@@ -0,0 +1,247 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+/** @file ttm_lock.h
+ * This file implements a simple replacement for the buffer manager use
+ * of the DRM heavyweight hardware lock.
+ * The lock is a read-write lock. Taking it in read mode and write mode
+ * is relatively fast, and intended for in-kernel use only.
+ *
+ * The vt mode is used only when there is a need to block all
+ * user-space processes from validating buffers.
+ * It's allowed to leave kernel space with the vt lock held.
+ * If a user-space process dies while having the vt-lock,
+ * it will be released during the file descriptor release. The vt lock
+ * excludes write lock and read lock.
+ *
+ * The suspend mode is used to lock out all TTM users when preparing for
+ * and executing suspend operations.
+ *
+ */
+
+#ifndef _TTM_LOCK_H_
+#define _TTM_LOCK_H_
+
+#include "ttm/ttm_object.h"
+#include <linux/wait.h>
+#include <asm/atomic.h>
+
+/**
+ * struct ttm_lock
+ *
+ * @base: ttm base object used solely to release the lock if the client
+ * holding the lock dies.
+ * @queue: Queue for processes waiting for lock change-of-status.
+ * @lock: Spinlock protecting some lock members.
+ * @rw: Read-write lock counter. Protected by @lock.
+ * @flags: Lock state. Protected by @lock.
+ * @kill_takers: Boolean whether to kill takers of the lock.
+ * @signal: Signal to send when kill_takers is true.
+ */
+
+struct ttm_lock {
+	struct ttm_base_object base;
+	wait_queue_head_t queue;
+	spinlock_t lock;
+	int32_t rw;
+	uint32_t flags;
+	bool kill_takers;
+	int signal;
+	struct ttm_object_file *vt_holder;
+};
+
+
+/**
+ * ttm_lock_init
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * Initializes the lock.
+ */
+extern void ttm_lock_init(struct ttm_lock *lock);
+
+/**
+ * ttm_read_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a read lock.
+ */
+extern void ttm_read_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_read_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in read mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_read_trylock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Tries to take the lock in read mode. If the lock is already held
+ * in write mode, the function will return -EBUSY. If the lock is held
+ * in vt or suspend mode, the function will sleep until these modes
+ * are unlocked.
+ *
+ * Returns:
+ * -EBUSY The lock was already held in write mode.
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_write_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a write lock.
+ */
+extern void ttm_write_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in write mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_lock_downgrade
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Downgrades a write lock to a read lock.
+ */
+extern void ttm_lock_downgrade(struct ttm_lock *lock);
+
+/**
+ * ttm_suspend_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Takes the lock in suspend mode. Excludes read and write mode.
+ */
+extern void ttm_suspend_lock(struct ttm_lock *lock);
+
+/**
+ * ttm_suspend_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a suspend lock
+ */
+extern void ttm_suspend_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_vt_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ * @tfile: Pointer to a struct ttm_object_file to register the lock with.
+ *
+ * Takes the lock in vt mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ * -ENOMEM: Out of memory when locking.
+ */
+extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible,
+		       struct ttm_object_file *tfile);
+
+/**
+ * ttm_vt_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a vt lock.
+ * Returns:
+ * -EINVAL If the lock was not held.
+ */
+extern int ttm_vt_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a write lock.
+ */
+extern void ttm_write_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in write mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_lock_set_kill
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @val: Boolean whether to kill processes taking the lock.
+ * @signal: Signal to send to the process taking the lock.
+ *
+ * The kill-when-taking-lock functionality is used to kill processes that keep
+ * on using the TTM functionality when its resources has been taken down, for
+ * example when the X server exits. A typical sequence would look like this:
+ * - X server takes lock in write mode.
+ * - ttm_lock_set_kill() is called with @val set to true.
+ * - As part of X server exit, TTM resources are taken down.
+ * - X server releases the lock on file release.
+ * - Another dri client wants to render, takes the lock and is killed.
+ *
+ */
+static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val,
+				     int signal)
+{
+	lock->kill_takers = val;
+	if (val)
+		lock->signal = signal;
+}
+
+#endif
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 6983a7c..b199170 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -33,6 +33,7 @@
 #include <linux/wait.h>
 #include <linux/errno.h>
 #include <linux/kobject.h>
+#include <linux/mm.h>
 
 /**
  * struct ttm_mem_shrink - callback to shrink TTM memory usage.
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
new file mode 100644
index 0000000..703ca4d
--- /dev/null
+++ b/include/drm/ttm/ttm_object.h
@@ -0,0 +1,267 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/** @file ttm_object.h
+ *
+ * Base- and reference object implementation for the various
+ * ttm objects. Implements reference counting, minimal security checks
+ * and release on file close.
+ */
+
+#ifndef _TTM_OBJECT_H_
+#define _TTM_OBJECT_H_
+
+#include <linux/list.h>
+#include "drm_hashtab.h"
+#include <linux/kref.h>
+#include <ttm/ttm_memory.h>
+
+/**
+ * enum ttm_ref_type
+ *
+ * Describes what type of reference a ref object holds.
+ *
+ * TTM_REF_USAGE is a simple refcount on a base object.
+ *
+ * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
+ * buffer object.
+ *
+ * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
+ * buffer object.
+ *
+ */
+
+enum ttm_ref_type {
+	TTM_REF_USAGE,
+	TTM_REF_SYNCCPU_READ,
+	TTM_REF_SYNCCPU_WRITE,
+	TTM_REF_NUM
+};
+
+/**
+ * enum ttm_object_type
+ *
+ * One entry per ttm object type.
+ * Device-specific types should use the
+ * ttm_driver_typex types.
+ */
+
+enum ttm_object_type {
+	ttm_fence_type,
+	ttm_buffer_type,
+	ttm_lock_type,
+	ttm_driver_type0 = 256,
+	ttm_driver_type1
+};
+
+struct ttm_object_file;
+struct ttm_object_device;
+
+/**
+ * struct ttm_base_object
+ *
+ * @hash: hash entry for the per-device object hash.
+ * @type: derived type this object is base class for.
+ * @shareable: Other ttm_object_files can access this object.
+ *
+ * @tfile: Pointer to ttm_object_file of the creator.
+ * NULL if the object was not created by a user request.
+ * (kernel object).
+ *
+ * @refcount: Number of references to this object, not
+ * including the hash entry. A reference to a base object can
+ * only be held by a ref object.
+ *
+ * @refcount_release: A function to be called when there are
+ * no more references to this object. This function should
+ * destroy the object (or make sure destruction eventually happens),
+ * and when it is called, the object has
+ * already been taken out of the per-device hash. The parameter
+ * "base" should be set to NULL by the function.
+ *
+ * @ref_obj_release: A function to be called when a reference object
+ * with another ttm_ref_type than TTM_REF_USAGE is deleted.
+ * this function may, for example, release a lock held by a user-space
+ * process.
+ *
+ * This struct is intended to be used as a base struct for objects that
+ * are visible to user-space. It provides a global name, race-safe
+ * access and refcounting, minimal access contol and hooks for unref actions.
+ */
+
+struct ttm_base_object {
+	struct drm_hash_item hash;
+	enum ttm_object_type object_type;
+	bool shareable;
+	struct ttm_object_file *tfile;
+	struct kref refcount;
+	void (*refcount_release) (struct ttm_base_object **base);
+	void (*ref_obj_release) (struct ttm_base_object *base,
+				 enum ttm_ref_type ref_type);
+};
+
+/**
+ * ttm_base_object_init
+ *
+ * @tfile: Pointer to a struct ttm_object_file.
+ * @base: The struct ttm_base_object to initialize.
+ * @shareable: This object is shareable with other applcations.
+ * (different @tfile pointers.)
+ * @type: The object type.
+ * @refcount_release: See the struct ttm_base_object description.
+ * @ref_obj_release: See the struct ttm_base_object description.
+ *
+ * Initializes a struct ttm_base_object.
+ */
+
+extern int ttm_base_object_init(struct ttm_object_file *tfile,
+				struct ttm_base_object *base,
+				bool shareable,
+				enum ttm_object_type type,
+				void (*refcount_release) (struct ttm_base_object
+							  **),
+				void (*ref_obj_release) (struct ttm_base_object
+							 *,
+							 enum ttm_ref_type
+							 ref_type));
+
+/**
+ * ttm_base_object_lookup
+ *
+ * @tfile: Pointer to a struct ttm_object_file.
+ * @key: Hash key
+ *
+ * Looks up a struct ttm_base_object with the key @key.
+ * Also verifies that the object is visible to the application, by
+ * comparing the @tfile argument and checking the object shareable flag.
+ */
+
+extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
+						      *tfile, uint32_t key);
+
+/**
+ * ttm_base_object_unref
+ *
+ * @p_base: Pointer to a pointer referncing a struct ttm_base_object.
+ *
+ * Decrements the base object refcount and clears the pointer pointed to by
+ * p_base.
+ */
+
+extern void ttm_base_object_unref(struct ttm_base_object **p_base);
+
+/**
+ * ttm_ref_object_add.
+ *
+ * @tfile: A struct ttm_object_file representing the application owning the
+ * ref_object.
+ * @base: The base object to reference.
+ * @ref_type: The type of reference.
+ * @existed: Upon completion, indicates that an identical reference object
+ * already existed, and the refcount was upped on that object instead.
+ *
+ * Adding a ref object to a base object is basically like referencing the
+ * base object, but a user-space application holds the reference. When the
+ * file corresponding to @tfile is closed, all its reference objects are
+ * deleted. A reference object can have different types depending on what
+ * it's intended for. It can be refcounting to prevent object destruction,
+ * When user-space takes a lock, it can add a ref object to that lock to
+ * make sure the lock is released if the application dies. A ref object
+ * will hold a single reference on a base object.
+ */
+extern int ttm_ref_object_add(struct ttm_object_file *tfile,
+			      struct ttm_base_object *base,
+			      enum ttm_ref_type ref_type, bool *existed);
+/**
+ * ttm_ref_object_base_unref
+ *
+ * @key: Key representing the base object.
+ * @ref_type: Ref type of the ref object to be dereferenced.
+ *
+ * Unreference a ref object with type @ref_type
+ * on the base object identified by @key. If there are no duplicate
+ * references, the ref object will be destroyed and the base object
+ * will be unreferenced.
+ */
+extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
+				     unsigned long key,
+				     enum ttm_ref_type ref_type);
+
+/**
+ * ttm_object_file_init - initialize a struct ttm_object file
+ *
+ * @tdev: A struct ttm_object device this file is initialized on.
+ * @hash_order: Order of the hash table used to hold the reference objects.
+ *
+ * This is typically called by the file_ops::open function.
+ */
+
+extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
+						    *tdev,
+						    unsigned int hash_order);
+
+/**
+ * ttm_object_file_release - release data held by a ttm_object_file
+ *
+ * @p_tfile: Pointer to pointer to the ttm_object_file object to release.
+ * *p_tfile will be set to NULL by this function.
+ *
+ * Releases all data associated by a ttm_object_file.
+ * Typically called from file_ops::release. The caller must
+ * ensure that there are no concurrent users of tfile.
+ */
+
+extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
+
+/**
+ * ttm_object device init - initialize a struct ttm_object_device
+ *
+ * @hash_order: Order of hash table used to hash the base objects.
+ *
+ * This function is typically called on device initialization to prepare
+ * data structures needed for ttm base and ref objects.
+ */
+
+extern struct ttm_object_device *ttm_object_device_init
+    (struct ttm_mem_global *mem_glob, unsigned int hash_order);
+
+/**
+ * ttm_object_device_release - release data held by a ttm_object_device
+ *
+ * @p_tdev: Pointer to pointer to the ttm_object_device object to release.
+ * *p_tdev will be set to NULL by this function.
+ *
+ * Releases all data associated by a ttm_object_device.
+ * Typically called from driver::unload before the destruction of the
+ * device private data structure.
+ */
+
+extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
+
+#endif
diff --git a/include/drm/via_drm.h b/include/drm/via_drm.h
index 170786e..fd11a5b 100644
--- a/include/drm/via_drm.h
+++ b/include/drm/via_drm.h
@@ -24,7 +24,7 @@
 #ifndef _VIA_DRM_H_
 #define _VIA_DRM_H_
 
-#include <linux/types.h>
+#include "drm.h"
 
 /* WARNING: These defines must be the same as what the Xserver uses.
  * if you change them, you must change the defines in the Xserver.
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index ce52040..3fe02cf 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -151,6 +151,8 @@
 #define NILFS_MOUNT_BARRIER		0x1000  /* Use block barriers */
 #define NILFS_MOUNT_STRICT_ORDER	0x2000  /* Apply strict in-order
 						   semantics also for data */
+#define NILFS_MOUNT_NORECOVERY		0x4000  /* Disable write access during
+						   mount-time recovery */
 
 
 /**
@@ -403,6 +405,28 @@
 #define NILFS_SS_GC     0x0010  /* segment written for cleaner operation */
 
 /**
+ * struct nilfs_btree_node - B-tree node
+ * @bn_flags: flags
+ * @bn_level: level
+ * @bn_nchildren: number of children
+ * @bn_pad: padding
+ */
+struct nilfs_btree_node {
+	__u8 bn_flags;
+	__u8 bn_level;
+	__le16 bn_nchildren;
+	__le32 bn_pad;
+};
+
+/* flags */
+#define NILFS_BTREE_NODE_ROOT   0x01
+
+/* level */
+#define NILFS_BTREE_LEVEL_DATA          0
+#define NILFS_BTREE_LEVEL_NODE_MIN      (NILFS_BTREE_LEVEL_DATA + 1)
+#define NILFS_BTREE_LEVEL_MAX           14
+
+/**
  * struct nilfs_palloc_group_desc - block group descriptor
  * @pg_nfrees: number of free entries in block group
  */
diff --git a/include/linux/omapfb.h b/include/linux/omapfb.h
new file mode 100644
index 0000000..f46c40a
--- /dev/null
+++ b/include/linux/omapfb.h
@@ -0,0 +1,251 @@
+/*
+ * File: include/linux/omapfb.h
+ *
+ * Framebuffer driver for TI OMAP boards
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#ifndef __LINUX_OMAPFB_H__
+#define __LINUX_OMAPFB_H__
+
+#include <linux/fb.h>
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/* IOCTL commands. */
+
+#define OMAP_IOW(num, dtype)	_IOW('O', num, dtype)
+#define OMAP_IOR(num, dtype)	_IOR('O', num, dtype)
+#define OMAP_IOWR(num, dtype)	_IOWR('O', num, dtype)
+#define OMAP_IO(num)		_IO('O', num)
+
+#define OMAPFB_MIRROR		OMAP_IOW(31, int)
+#define OMAPFB_SYNC_GFX		OMAP_IO(37)
+#define OMAPFB_VSYNC		OMAP_IO(38)
+#define OMAPFB_SET_UPDATE_MODE	OMAP_IOW(40, int)
+#define OMAPFB_GET_CAPS		OMAP_IOR(42, struct omapfb_caps)
+#define OMAPFB_GET_UPDATE_MODE	OMAP_IOW(43, int)
+#define OMAPFB_LCD_TEST		OMAP_IOW(45, int)
+#define OMAPFB_CTRL_TEST	OMAP_IOW(46, int)
+#define OMAPFB_UPDATE_WINDOW_OLD OMAP_IOW(47, struct omapfb_update_window_old)
+#define OMAPFB_SET_COLOR_KEY	OMAP_IOW(50, struct omapfb_color_key)
+#define OMAPFB_GET_COLOR_KEY	OMAP_IOW(51, struct omapfb_color_key)
+#define OMAPFB_SETUP_PLANE	OMAP_IOW(52, struct omapfb_plane_info)
+#define OMAPFB_QUERY_PLANE	OMAP_IOW(53, struct omapfb_plane_info)
+#define OMAPFB_UPDATE_WINDOW	OMAP_IOW(54, struct omapfb_update_window)
+#define OMAPFB_SETUP_MEM	OMAP_IOW(55, struct omapfb_mem_info)
+#define OMAPFB_QUERY_MEM	OMAP_IOW(56, struct omapfb_mem_info)
+#define OMAPFB_WAITFORVSYNC	OMAP_IO(57)
+#define OMAPFB_MEMORY_READ	OMAP_IOR(58, struct omapfb_memory_read)
+#define OMAPFB_GET_OVERLAY_COLORMODE OMAP_IOR(59, struct omapfb_ovl_colormode)
+#define OMAPFB_WAITFORGO	OMAP_IO(60)
+#define OMAPFB_GET_VRAM_INFO	OMAP_IOR(61, struct omapfb_vram_info)
+#define OMAPFB_SET_TEARSYNC	OMAP_IOW(62, struct omapfb_tearsync_info)
+
+#define OMAPFB_CAPS_GENERIC_MASK	0x00000fff
+#define OMAPFB_CAPS_LCDC_MASK		0x00fff000
+#define OMAPFB_CAPS_PANEL_MASK		0xff000000
+
+#define OMAPFB_CAPS_MANUAL_UPDATE	0x00001000
+#define OMAPFB_CAPS_TEARSYNC		0x00002000
+#define OMAPFB_CAPS_PLANE_RELOCATE_MEM	0x00004000
+#define OMAPFB_CAPS_PLANE_SCALE		0x00008000
+#define OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE	0x00010000
+#define OMAPFB_CAPS_WINDOW_SCALE	0x00020000
+#define OMAPFB_CAPS_WINDOW_OVERLAY	0x00040000
+#define OMAPFB_CAPS_WINDOW_ROTATE	0x00080000
+#define OMAPFB_CAPS_SET_BACKLIGHT	0x01000000
+
+/* Values from DSP must map to lower 16-bits */
+#define OMAPFB_FORMAT_MASK		0x00ff
+#define OMAPFB_FORMAT_FLAG_DOUBLE	0x0100
+#define OMAPFB_FORMAT_FLAG_TEARSYNC	0x0200
+#define OMAPFB_FORMAT_FLAG_FORCE_VSYNC	0x0400
+#define OMAPFB_FORMAT_FLAG_ENABLE_OVERLAY	0x0800
+#define OMAPFB_FORMAT_FLAG_DISABLE_OVERLAY	0x1000
+
+#define OMAPFB_MEMTYPE_SDRAM		0
+#define OMAPFB_MEMTYPE_SRAM		1
+#define OMAPFB_MEMTYPE_MAX		1
+
+enum omapfb_color_format {
+	OMAPFB_COLOR_RGB565 = 0,
+	OMAPFB_COLOR_YUV422,
+	OMAPFB_COLOR_YUV420,
+	OMAPFB_COLOR_CLUT_8BPP,
+	OMAPFB_COLOR_CLUT_4BPP,
+	OMAPFB_COLOR_CLUT_2BPP,
+	OMAPFB_COLOR_CLUT_1BPP,
+	OMAPFB_COLOR_RGB444,
+	OMAPFB_COLOR_YUY422,
+
+	OMAPFB_COLOR_ARGB16,
+	OMAPFB_COLOR_RGB24U,	/* RGB24, 32-bit container */
+	OMAPFB_COLOR_RGB24P,	/* RGB24, 24-bit container */
+	OMAPFB_COLOR_ARGB32,
+	OMAPFB_COLOR_RGBA32,
+	OMAPFB_COLOR_RGBX32,
+};
+
+struct omapfb_update_window {
+	__u32 x, y;
+	__u32 width, height;
+	__u32 format;
+	__u32 out_x, out_y;
+	__u32 out_width, out_height;
+	__u32 reserved[8];
+};
+
+struct omapfb_update_window_old {
+	__u32 x, y;
+	__u32 width, height;
+	__u32 format;
+};
+
+enum omapfb_plane {
+	OMAPFB_PLANE_GFX = 0,
+	OMAPFB_PLANE_VID1,
+	OMAPFB_PLANE_VID2,
+};
+
+enum omapfb_channel_out {
+	OMAPFB_CHANNEL_OUT_LCD = 0,
+	OMAPFB_CHANNEL_OUT_DIGIT,
+};
+
+struct omapfb_plane_info {
+	__u32 pos_x;
+	__u32 pos_y;
+	__u8  enabled;
+	__u8  channel_out;
+	__u8  mirror;
+	__u8  reserved1;
+	__u32 out_width;
+	__u32 out_height;
+	__u32 reserved2[12];
+};
+
+struct omapfb_mem_info {
+	__u32 size;
+	__u8  type;
+	__u8  reserved[3];
+};
+
+struct omapfb_caps {
+	__u32 ctrl;
+	__u32 plane_color;
+	__u32 wnd_color;
+};
+
+enum omapfb_color_key_type {
+	OMAPFB_COLOR_KEY_DISABLED = 0,
+	OMAPFB_COLOR_KEY_GFX_DST,
+	OMAPFB_COLOR_KEY_VID_SRC,
+};
+
+struct omapfb_color_key {
+	__u8  channel_out;
+	__u32 background;
+	__u32 trans_key;
+	__u8  key_type;
+};
+
+enum omapfb_update_mode {
+	OMAPFB_UPDATE_DISABLED = 0,
+	OMAPFB_AUTO_UPDATE,
+	OMAPFB_MANUAL_UPDATE
+};
+
+struct omapfb_memory_read {
+	__u16 x;
+	__u16 y;
+	__u16 w;
+	__u16 h;
+	size_t buffer_size;
+	void __user *buffer;
+};
+
+struct omapfb_ovl_colormode {
+	__u8 overlay_idx;
+	__u8 mode_idx;
+	__u32 bits_per_pixel;
+	__u32 nonstd;
+	struct fb_bitfield red;
+	struct fb_bitfield green;
+	struct fb_bitfield blue;
+	struct fb_bitfield transp;
+};
+
+struct omapfb_vram_info {
+	__u32 total;
+	__u32 free;
+	__u32 largest_free_block;
+	__u32 reserved[5];
+};
+
+struct omapfb_tearsync_info {
+	__u8 enabled;
+	__u8 reserved1[3];
+	__u16 line;
+	__u16 reserved2;
+};
+
+#ifdef __KERNEL__
+
+#include <plat/board.h>
+
+#ifdef CONFIG_ARCH_OMAP1
+#define OMAPFB_PLANE_NUM		1
+#else
+#define OMAPFB_PLANE_NUM		3
+#endif
+
+struct omapfb_mem_region {
+	u32		paddr;
+	void __iomem	*vaddr;
+	unsigned long	size;
+	u8		type;		/* OMAPFB_PLANE_MEM_* */
+	enum omapfb_color_format format;/* OMAPFB_COLOR_* */
+	unsigned	format_used:1;	/* Must be set when format is set.
+					 * Needed b/c of the badly chosen 0
+					 * base for OMAPFB_COLOR_* values
+					 */
+	unsigned	alloc:1;	/* allocated by the driver */
+	unsigned	map:1;		/* kernel mapped by the driver */
+};
+
+struct omapfb_mem_desc {
+	int				region_cnt;
+	struct omapfb_mem_region	region[OMAPFB_PLANE_NUM];
+};
+
+struct omapfb_platform_data {
+	struct omap_lcd_config		lcd;
+	struct omapfb_mem_desc		mem_desc;
+	void				*ctrl_platform_data;
+};
+
+/* in arch/arm/plat-omap/fb.c */
+extern void omapfb_set_platform_data(struct omapfb_platform_data *data);
+extern void omapfb_set_ctrl_platform_data(void *pdata);
+extern void omapfb_reserve_sdram(void);
+
+#endif
+
+#endif /* __OMAPFB_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index f5c7cd3..04771b9 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -218,6 +218,7 @@
 	unsigned int	class;		/* 3 bytes: (base,sub,prog-if) */
 	u8		revision;	/* PCI revision, low byte of class word */
 	u8		hdr_type;	/* PCI header type (`multi' flag masked out) */
+	u8		pcie_cap;	/* PCI-E capability offset */
 	u8		pcie_type;	/* PCI-E device/port type */
 	u8		rom_base_reg;	/* which config register controls the ROM */
 	u8		pin;  		/* which interrupt pin this device uses */
@@ -280,6 +281,7 @@
 	unsigned int	is_virtfn:1;
 	unsigned int	reset_fn:1;
 	unsigned int    is_hotplug_bridge:1;
+	unsigned int    aer_firmware_first:1;
 	pci_dev_flags_t dev_flags;
 	atomic_t	enable_cnt;	/* pci_enable_device has been called */
 
@@ -635,7 +637,13 @@
 				unsigned int ss_vendor, unsigned int ss_device,
 				struct pci_dev *from);
 struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
-struct pci_dev *pci_get_bus_and_slot(unsigned int bus, unsigned int devfn);
+struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
+					    unsigned int devfn);
+static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
+						   unsigned int devfn)
+{
+	return pci_get_domain_bus_and_slot(0, bus, devfn);
+}
 struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
 int pci_dev_present(const struct pci_device_id *ids);
 
@@ -701,6 +709,7 @@
 void pci_set_master(struct pci_dev *dev);
 void pci_clear_master(struct pci_dev *dev);
 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
+int pci_set_cacheline_size(struct pci_dev *dev);
 #define HAVE_PCI_SET_MWI
 int __must_check pci_set_mwi(struct pci_dev *dev);
 int pci_try_set_mwi(struct pci_dev *dev);
@@ -1246,6 +1255,8 @@
 
 extern unsigned long pci_cardbus_io_size;
 extern unsigned long pci_cardbus_mem_size;
+extern u8 pci_dfl_cache_line_size;
+extern u8 pci_cache_line_size;
 
 extern unsigned long pci_hotplug_io_size;
 extern unsigned long pci_hotplug_mem_size;
@@ -1290,5 +1301,34 @@
 extern void pci_hp_remove_module_link(struct pci_slot *pci_slot);
 #endif
 
+/**
+ * pci_pcie_cap - get the saved PCIe capability offset
+ * @dev: PCI device
+ *
+ * PCIe capability offset is calculated at PCI device initialization
+ * time and saved in the data structure. This function returns saved
+ * PCIe capability offset. Using this instead of pci_find_capability()
+ * reduces unnecessary search in the PCI configuration space. If you
+ * need to calculate PCIe capability offset from raw device for some
+ * reasons, please use pci_find_capability() instead.
+ */
+static inline int pci_pcie_cap(struct pci_dev *dev)
+{
+	return dev->pcie_cap;
+}
+
+/**
+ * pci_is_pcie - check if the PCI device is PCI Express capable
+ * @dev: PCI device
+ *
+ * Retrun true if the PCI device is PCI Express capable, false otherwise.
+ */
+static inline bool pci_is_pcie(struct pci_dev *dev)
+{
+	return !!pci_pcie_cap(dev);
+}
+
+void pci_request_acs(void);
+
 #endif /* __KERNEL__ */
 #endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index dd0bed4..9f2ad0a 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -365,6 +365,11 @@
 #define  PCI_X_STATUS_266MHZ	0x40000000	/* 266 MHz capable */
 #define  PCI_X_STATUS_533MHZ	0x80000000	/* 533 MHz capable */
 
+/* PCI Bridge Subsystem ID registers */
+
+#define PCI_SSVID_VENDOR_ID     4	/* PCI-Bridge subsystem vendor id register */
+#define PCI_SSVID_DEVICE_ID     6	/* PCI-Bridge subsystem device id register */
+
 /* PCI Express capability registers */
 
 #define PCI_EXP_FLAGS		2	/* Capabilities register */
@@ -502,6 +507,7 @@
 #define PCI_EXT_CAP_ID_VC	2
 #define PCI_EXT_CAP_ID_DSN	3
 #define PCI_EXT_CAP_ID_PWR	4
+#define PCI_EXT_CAP_ID_ACS	13
 #define PCI_EXT_CAP_ID_ARI	14
 #define PCI_EXT_CAP_ID_ATS	15
 #define PCI_EXT_CAP_ID_SRIOV	16
@@ -662,4 +668,16 @@
 #define  PCI_SRIOV_VFM_MO	0x2	/* Active.MigrateOut */
 #define  PCI_SRIOV_VFM_AV	0x3	/* Active.Available */
 
+/* Access Control Service */
+#define PCI_ACS_CAP		0x04	/* ACS Capability Register */
+#define  PCI_ACS_SV		0x01	/* Source Validation */
+#define  PCI_ACS_TB		0x02	/* Translation Blocking */
+#define  PCI_ACS_RR		0x04	/* P2P Request Redirect */
+#define  PCI_ACS_CR		0x08	/* P2P Completion Redirect */
+#define  PCI_ACS_UF		0x10	/* Upstream Forwarding */
+#define  PCI_ACS_EC		0x20	/* P2P Egress Control */
+#define  PCI_ACS_DT		0x40	/* Direct Translated P2P */
+#define PCI_ACS_CTRL		0x06	/* ACS Control Register */
+#define PCI_ACS_EGRESS_CTL_V	0x08	/* ACS Egress Control Vector */
+
 #endif /* LINUX_PCI_REGS_H */
diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h
index b4c7954..6775532 100644
--- a/include/linux/pcieport_if.h
+++ b/include/linux/pcieport_if.h
@@ -10,10 +10,7 @@
 #define _PCIEPORT_IF_H_
 
 /* Port Type */
-#define PCIE_RC_PORT			4	/* Root port of RC */
-#define PCIE_SW_UPSTREAM_PORT		5	/* Upstream port of Switch */
-#define PCIE_SW_DOWNSTREAM_PORT		6	/* Downstream port of Switch */
-#define PCIE_ANY_PORT			7
+#define PCIE_ANY_PORT			(~0)
 
 /* Service Type */
 #define PCIE_PORT_SERVICE_PME_SHIFT	0	/* Power Management Event */
@@ -25,17 +22,6 @@
 #define PCIE_PORT_SERVICE_VC_SHIFT	3	/* Virtual Channel */
 #define PCIE_PORT_SERVICE_VC		(1 << PCIE_PORT_SERVICE_VC_SHIFT)
 
-/* Root/Upstream/Downstream Port's Interrupt Mode */
-#define PCIE_PORT_NO_IRQ		(-1)
-#define PCIE_PORT_INTx_MODE		0
-#define PCIE_PORT_MSI_MODE		1
-#define PCIE_PORT_MSIX_MODE		2
-
-struct pcie_port_data {
-	int port_type;		/* Type of the port */
-	int port_irq_mode;	/* [0:INTx | 1:MSI | 2:MSI-X] */
-};
-
 struct pcie_device {
 	int 		irq;	    /* Service IRQ/MSI/MSI-X Vector */
 	struct pci_dev *port;	    /* Root/Upstream/Downstream Port */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index bc70c58..939a615 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -834,4 +834,8 @@
 asmlinkage long sys_perf_event_open(
 		struct perf_event_attr __user *attr_uptr,
 		pid_t pid, int cpu, int group_fd, unsigned long flags);
+
+asmlinkage long sys_mmap_pgoff(unsigned long addr, unsigned long len,
+			unsigned long prot, unsigned long flags,
+			unsigned long fd, unsigned long pgoff);
 #endif
diff --git a/include/linux/usb.h b/include/linux/usb.h
index a34fa89..e101a2d 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -331,6 +331,7 @@
 	u8 otg_port;			/* 0, or number of OTG/HNP port */
 	unsigned is_b_host:1;		/* true during some HNP roleswitches */
 	unsigned b_hnp_enable:1;	/* OTG: did A-Host enable HNP? */
+	unsigned sg_tablesize;		/* 0 or largest number of sg list entries */
 
 	int devnum_next;		/* Next open device number in
 					 * round-robin allocation */
@@ -428,11 +429,9 @@
  * @last_busy: time of last use
  * @autosuspend_delay: in jiffies
  * @connect_time: time device was first connected
- * @auto_pm: autosuspend/resume in progress
  * @do_remote_wakeup:  remote wakeup should be enabled
  * @reset_resume: needs reset instead of resume
  * @autosuspend_disabled: autosuspend disabled by the user
- * @autoresume_disabled: autoresume disabled by the user
  * @skip_sys_resume: skip the next system resume
  * @wusb_dev: if this is a Wireless USB device, link to the WUSB
  *	specific data for the device.
@@ -513,11 +512,9 @@
 	int autosuspend_delay;
 	unsigned long connect_time;
 
-	unsigned auto_pm:1;
 	unsigned do_remote_wakeup:1;
 	unsigned reset_resume:1;
 	unsigned autosuspend_disabled:1;
-	unsigned autoresume_disabled:1;
 	unsigned skip_sys_resume:1;
 #endif
 	struct wusb_dev *wusb_dev;
@@ -543,22 +540,20 @@
 
 /* USB autosuspend and autoresume */
 #ifdef CONFIG_USB_SUSPEND
-extern int usb_autopm_set_interface(struct usb_interface *intf);
 extern int usb_autopm_get_interface(struct usb_interface *intf);
 extern void usb_autopm_put_interface(struct usb_interface *intf);
 extern int usb_autopm_get_interface_async(struct usb_interface *intf);
 extern void usb_autopm_put_interface_async(struct usb_interface *intf);
 
-static inline void usb_autopm_enable(struct usb_interface *intf)
+static inline void usb_autopm_get_interface_no_resume(
+		struct usb_interface *intf)
 {
-	atomic_set(&intf->pm_usage_cnt, 0);
-	usb_autopm_set_interface(intf);
+	atomic_inc(&intf->pm_usage_cnt);
 }
-
-static inline void usb_autopm_disable(struct usb_interface *intf)
+static inline void usb_autopm_put_interface_no_suspend(
+		struct usb_interface *intf)
 {
-	atomic_set(&intf->pm_usage_cnt, 1);
-	usb_autopm_set_interface(intf);
+	atomic_dec(&intf->pm_usage_cnt);
 }
 
 static inline void usb_mark_last_busy(struct usb_device *udev)
@@ -568,12 +563,8 @@
 
 #else
 
-static inline int usb_autopm_set_interface(struct usb_interface *intf)
-{ return 0; }
-
 static inline int usb_autopm_get_interface(struct usb_interface *intf)
 { return 0; }
-
 static inline int usb_autopm_get_interface_async(struct usb_interface *intf)
 { return 0; }
 
@@ -581,9 +572,11 @@
 { }
 static inline void usb_autopm_put_interface_async(struct usb_interface *intf)
 { }
-static inline void usb_autopm_enable(struct usb_interface *intf)
+static inline void usb_autopm_get_interface_no_resume(
+		struct usb_interface *intf)
 { }
-static inline void usb_autopm_disable(struct usb_interface *intf)
+static inline void usb_autopm_put_interface_no_suspend(
+		struct usb_interface *intf)
 { }
 static inline void usb_mark_last_busy(struct usb_device *udev)
 { }
@@ -626,6 +619,10 @@
 		unsigned ifnum);
 extern struct usb_host_interface *usb_altnum_to_altsetting(
 		const struct usb_interface *intf, unsigned int altnum);
+extern struct usb_host_interface *usb_find_alt_setting(
+		struct usb_host_config *config,
+		unsigned int iface_num,
+		unsigned int alt_num);
 
 
 /**
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 4f6bb3d..738ea1a 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -127,6 +127,7 @@
 	/* private: */
 	/* internals */
 	struct list_head		list;
+	DECLARE_BITMAP(endpoints, 32);
 };
 
 int usb_add_function(struct usb_configuration *, struct usb_function *);
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index 2443c0e..52bb917 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -33,6 +33,23 @@
 	OTG_STATE_A_VBUS_ERR,
 };
 
+#define USB_OTG_PULLUP_ID		(1 << 0)
+#define USB_OTG_PULLDOWN_DP		(1 << 1)
+#define USB_OTG_PULLDOWN_DM		(1 << 2)
+#define USB_OTG_EXT_VBUS_INDICATOR	(1 << 3)
+#define USB_OTG_DRV_VBUS		(1 << 4)
+#define USB_OTG_DRV_VBUS_EXT		(1 << 5)
+
+struct otg_transceiver;
+
+/* for transceivers connected thru an ULPI interface, the user must
+ * provide access ops
+ */
+struct otg_io_access_ops {
+	int (*read)(struct otg_transceiver *otg, u32 reg);
+	int (*write)(struct otg_transceiver *otg, u32 val, u32 reg);
+};
+
 /*
  * the otg driver needs to interact with both device side and host side
  * usb controllers.  it decides which controller is active at a given
@@ -42,6 +59,7 @@
 struct otg_transceiver {
 	struct device		*dev;
 	const char		*label;
+	unsigned int		 flags;
 
 	u8			default_a;
 	enum usb_otg_state	state;
@@ -49,10 +67,17 @@
 	struct usb_bus		*host;
 	struct usb_gadget	*gadget;
 
+	struct otg_io_access_ops	*io_ops;
+	void __iomem			*io_priv;
+
 	/* to pass extra port status to the root hub */
 	u16			port_status;
 	u16			port_change;
 
+	/* initialize/shutdown the OTG controller */
+	int	(*init)(struct otg_transceiver *otg);
+	void	(*shutdown)(struct otg_transceiver *otg);
+
 	/* bind/unbind the host controller */
 	int	(*set_host)(struct otg_transceiver *otg,
 				struct usb_bus *host);
@@ -65,6 +90,10 @@
 	int	(*set_power)(struct otg_transceiver *otg,
 				unsigned mA);
 
+	/* effective for A-peripheral, ignored for B devices */
+	int	(*set_vbus)(struct otg_transceiver *otg,
+				bool enabled);
+
 	/* for non-OTG B devices: set transceiver into suspend mode */
 	int	(*set_suspend)(struct otg_transceiver *otg,
 				int suspend);
@@ -85,6 +114,38 @@
 extern void usb_nop_xceiv_register(void);
 extern void usb_nop_xceiv_unregister(void);
 
+/* helpers for direct access thru low-level io interface */
+static inline int otg_io_read(struct otg_transceiver *otg, u32 reg)
+{
+	if (otg->io_ops && otg->io_ops->read)
+		return otg->io_ops->read(otg, reg);
+
+	return -EINVAL;
+}
+
+static inline int otg_io_write(struct otg_transceiver *otg, u32 reg, u32 val)
+{
+	if (otg->io_ops && otg->io_ops->write)
+		return otg->io_ops->write(otg, reg, val);
+
+	return -EINVAL;
+}
+
+static inline int
+otg_init(struct otg_transceiver *otg)
+{
+	if (otg->init)
+		return otg->init(otg);
+
+	return 0;
+}
+
+static inline void
+otg_shutdown(struct otg_transceiver *otg)
+{
+	if (otg->shutdown)
+		otg->shutdown(otg);
+}
 
 /* for usb host and peripheral controller drivers */
 extern struct otg_transceiver *otg_get_transceiver(void);
@@ -97,6 +158,12 @@
 	return otg->start_hnp(otg);
 }
 
+/* Context: can sleep */
+static inline int
+otg_set_vbus(struct otg_transceiver *otg, bool enabled)
+{
+	return otg->set_vbus(otg, enabled);
+}
 
 /* for HCDs */
 static inline int
@@ -105,7 +172,6 @@
 	return otg->set_host(otg, host);
 }
 
-
 /* for usb peripheral controller drivers */
 
 /* Context: can sleep */
diff --git a/include/linux/usb/ulpi.h b/include/linux/usb/ulpi.h
new file mode 100644
index 0000000..20675c6
--- /dev/null
+++ b/include/linux/usb/ulpi.h
@@ -0,0 +1,7 @@
+#ifndef __LINUX_USB_ULPI_H
+#define __LINUX_USB_ULPI_H
+
+struct otg_transceiver *otg_ulpi_create(struct otg_io_access_ops *ops,
+					unsigned int flags);
+
+#endif /* __LINUX_USB_ULPI_H */
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 3d15fb9..a4b947e 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -56,7 +56,9 @@
 	US_FLAG(SANE_SENSE,     0x00008000)			\
 		/* Sane Sense (> 18 bytes) */			\
 	US_FLAG(CAPACITY_OK,	0x00010000)			\
-		/* READ CAPACITY response is correct */
+		/* READ CAPACITY response is correct */		\
+	US_FLAG(BAD_SENSE,	0x00020000)			\
+		/* Bad Sense (never more than 18 bytes) */
 
 #define US_FLAG(name, value)	US_FL_##name = value ,
 enum { US_DO_ALL_FLAGS };
diff --git a/include/xen/xen.h b/include/xen/xen.h
new file mode 100644
index 0000000..a164024
--- /dev/null
+++ b/include/xen/xen.h
@@ -0,0 +1,32 @@
+#ifndef _XEN_XEN_H
+#define _XEN_XEN_H
+
+enum xen_domain_type {
+	XEN_NATIVE,		/* running on bare hardware    */
+	XEN_PV_DOMAIN,		/* running in a PV domain      */
+	XEN_HVM_DOMAIN,		/* running in a Xen hvm domain */
+};
+
+#ifdef CONFIG_XEN
+extern enum xen_domain_type xen_domain_type;
+#else
+#define xen_domain_type		XEN_NATIVE
+#endif
+
+#define xen_domain()		(xen_domain_type != XEN_NATIVE)
+#define xen_pv_domain()		(xen_domain() &&			\
+				 xen_domain_type == XEN_PV_DOMAIN)
+#define xen_hvm_domain()	(xen_domain() &&			\
+				 xen_domain_type == XEN_HVM_DOMAIN)
+
+#ifdef CONFIG_XEN_DOM0
+#include <xen/interface/xen.h>
+#include <asm/xen/hypervisor.h>
+
+#define xen_initial_domain()	(xen_pv_domain() && \
+				 xen_start_info->flags & SIF_INITDOMAIN)
+#else  /* !CONFIG_XEN_DOM0 */
+#define xen_initial_domain()	(0)
+#endif	/* CONFIG_XEN_DOM0 */
+
+#endif	/* _XEN_XEN_H */
diff --git a/ipc/shm.c b/ipc/shm.c
index 464694e..11bec62 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -290,28 +290,28 @@
 	unsigned long flags)
 {
 	struct shm_file_data *sfd = shm_file_data(file);
-	return get_unmapped_area(sfd->file, addr, len, pgoff, flags);
-}
-
-int is_file_shm_hugepages(struct file *file)
-{
-	int ret = 0;
-
-	if (file->f_op == &shm_file_operations) {
-		struct shm_file_data *sfd;
-		sfd = shm_file_data(file);
-		ret = is_file_hugepages(sfd->file);
-	}
-	return ret;
+	return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
+						pgoff, flags);
 }
 
 static const struct file_operations shm_file_operations = {
 	.mmap		= shm_mmap,
 	.fsync		= shm_fsync,
 	.release	= shm_release,
+};
+
+static const struct file_operations shm_file_operations_huge = {
+	.mmap		= shm_mmap,
+	.fsync		= shm_fsync,
+	.release	= shm_release,
 	.get_unmapped_area	= shm_get_unmapped_area,
 };
 
+int is_file_shm_hugepages(struct file *file)
+{
+	return file->f_op == &shm_file_operations_huge;
+}
+
 static const struct vm_operations_struct shm_vm_ops = {
 	.open	= shm_open,	/* callback for a new vm-area open */
 	.close	= shm_close,	/* callback for when the vm-area is released */
@@ -889,7 +889,10 @@
 	if (!sfd)
 		goto out_put_dentry;
 
-	file = alloc_file(path.mnt, path.dentry, f_mode, &shm_file_operations);
+	file = alloc_file(path.mnt, path.dentry, f_mode,
+			is_file_hugepages(shp->shm_file) ?
+				&shm_file_operations_huge :
+				&shm_file_operations);
 	if (!file)
 		goto out_free;
 	ima_counts_get(file);
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
index 7d70146..2eb517e 100644
--- a/kernel/kgdb.c
+++ b/kernel/kgdb.c
@@ -129,6 +129,7 @@
 struct task_struct		*kgdb_contthread;
 
 int				kgdb_single_step;
+pid_t				kgdb_sstep_pid;
 
 /* Our I/O buffers. */
 static char			remcom_in_buffer[BUFMAX];
@@ -541,12 +542,17 @@
 	 */
 	if (tid == 0 || tid == -1)
 		tid = -atomic_read(&kgdb_active) - 2;
-	if (tid < 0) {
+	if (tid < -1 && tid > -NR_CPUS - 2) {
 		if (kgdb_info[-tid - 2].task)
 			return kgdb_info[-tid - 2].task;
 		else
 			return idle_task(-tid - 2);
 	}
+	if (tid <= 0) {
+		printk(KERN_ERR "KGDB: Internal thread select error\n");
+		dump_stack();
+		return NULL;
+	}
 
 	/*
 	 * find_task_by_pid_ns() does not take the tasklist lock anymore
@@ -619,7 +625,8 @@
 static int kgdb_activate_sw_breakpoints(void)
 {
 	unsigned long addr;
-	int error = 0;
+	int error;
+	int ret = 0;
 	int i;
 
 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
@@ -629,13 +636,16 @@
 		addr = kgdb_break[i].bpt_addr;
 		error = kgdb_arch_set_breakpoint(addr,
 				kgdb_break[i].saved_instr);
-		if (error)
-			return error;
+		if (error) {
+			ret = error;
+			printk(KERN_INFO "KGDB: BP install failed: %lx", addr);
+			continue;
+		}
 
 		kgdb_flush_swbreak_addr(addr);
 		kgdb_break[i].state = BP_ACTIVE;
 	}
-	return 0;
+	return ret;
 }
 
 static int kgdb_set_sw_break(unsigned long addr)
@@ -682,7 +692,8 @@
 static int kgdb_deactivate_sw_breakpoints(void)
 {
 	unsigned long addr;
-	int error = 0;
+	int error;
+	int ret = 0;
 	int i;
 
 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
@@ -691,13 +702,15 @@
 		addr = kgdb_break[i].bpt_addr;
 		error = kgdb_arch_remove_breakpoint(addr,
 					kgdb_break[i].saved_instr);
-		if (error)
-			return error;
+		if (error) {
+			printk(KERN_INFO "KGDB: BP remove failed: %lx\n", addr);
+			ret = error;
+		}
 
 		kgdb_flush_swbreak_addr(addr);
 		kgdb_break[i].state = BP_SET;
 	}
-	return 0;
+	return ret;
 }
 
 static int kgdb_remove_sw_break(unsigned long addr)
@@ -1204,8 +1217,10 @@
 		return 1;
 
 	} else {
-		error_packet(remcom_out_buffer, -EINVAL);
-		return 0;
+		kgdb_msg_write("KGDB only knows signal 9 (pass)"
+			" and 15 (pass and disconnect)\n"
+			"Executing a continue without signal passing\n", 0);
+		remcom_in_buffer[0] = 'c';
 	}
 
 	/* Indicate fall through */
@@ -1395,6 +1410,7 @@
 	struct kgdb_state kgdb_var;
 	struct kgdb_state *ks = &kgdb_var;
 	unsigned long flags;
+	int sstep_tries = 100;
 	int error = 0;
 	int i, cpu;
 
@@ -1425,13 +1441,14 @@
 		cpu_relax();
 
 	/*
-	 * Do not start the debugger connection on this CPU if the last
-	 * instance of the exception handler wanted to come into the
-	 * debugger on a different CPU via a single step
+	 * For single stepping, try to only enter on the processor
+	 * that was single stepping.  To gaurd against a deadlock, the
+	 * kernel will only try for the value of sstep_tries before
+	 * giving up and continuing on.
 	 */
 	if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
-	    atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
-
+	    (kgdb_info[cpu].task &&
+	     kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
 		atomic_set(&kgdb_active, -1);
 		touch_softlockup_watchdog();
 		clocksource_touch_watchdog();
@@ -1524,6 +1541,13 @@
 	}
 
 kgdb_restore:
+	if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
+		int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
+		if (kgdb_info[sstep_cpu].task)
+			kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
+		else
+			kgdb_sstep_pid = 0;
+	}
 	/* Free kgdb_active */
 	atomic_set(&kgdb_active, -1);
 	touch_softlockup_watchdog();
diff --git a/kernel/resource.c b/kernel/resource.c
index fb11a58..dc15686 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -308,35 +308,37 @@
 			 void *alignf_data)
 {
 	struct resource *this = root->child;
+	resource_size_t start, end;
 
-	new->start = root->start;
+	start = root->start;
 	/*
 	 * Skip past an allocated resource that starts at 0, since the assignment
 	 * of this->start - 1 to new->end below would cause an underflow.
 	 */
 	if (this && this->start == 0) {
-		new->start = this->end + 1;
+		start = this->end + 1;
 		this = this->sibling;
 	}
 	for(;;) {
 		if (this)
-			new->end = this->start - 1;
+			end = this->start - 1;
 		else
-			new->end = root->end;
-		if (new->start < min)
-			new->start = min;
-		if (new->end > max)
-			new->end = max;
-		new->start = ALIGN(new->start, align);
+			end = root->end;
+		if (start < min)
+			start = min;
+		if (end > max)
+			end = max;
+		start = ALIGN(start, align);
 		if (alignf)
 			alignf(alignf_data, new, size, align);
-		if (new->start < new->end && new->end - new->start >= size - 1) {
-			new->end = new->start + size - 1;
+		if (start < end && end - start >= size - 1) {
+			new->start = start;
+			new->end = start + size - 1;
 			return 0;
 		}
 		if (!this)
 			break;
-		new->start = this->end + 1;
+		start = this->end + 1;
 		this = this->sibling;
 	}
 	return -EBUSY;
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 33bed5e..6438cd5 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -595,37 +595,89 @@
 }
 
 static char *resource_string(char *buf, char *end, struct resource *res,
-				struct printf_spec spec)
+				struct printf_spec spec, const char *fmt)
 {
 #ifndef IO_RSRC_PRINTK_SIZE
-#define IO_RSRC_PRINTK_SIZE	4
+#define IO_RSRC_PRINTK_SIZE	6
 #endif
 
 #ifndef MEM_RSRC_PRINTK_SIZE
-#define MEM_RSRC_PRINTK_SIZE	8
+#define MEM_RSRC_PRINTK_SIZE	10
 #endif
-	struct printf_spec num_spec = {
+	struct printf_spec hex_spec = {
 		.base = 16,
 		.precision = -1,
 		.flags = SPECIAL | SMALL | ZEROPAD,
 	};
-	/* room for the actual numbers, the two "0x", -, [, ] and the final zero */
-	char sym[4*sizeof(resource_size_t) + 8];
-	char *p = sym, *pend = sym + sizeof(sym);
-	int size = -1;
+	struct printf_spec dec_spec = {
+		.base = 10,
+		.precision = -1,
+		.flags = 0,
+	};
+	struct printf_spec str_spec = {
+		.field_width = -1,
+		.precision = 10,
+		.flags = LEFT,
+	};
+	struct printf_spec flag_spec = {
+		.base = 16,
+		.precision = -1,
+		.flags = SPECIAL | SMALL,
+	};
 
-	if (res->flags & IORESOURCE_IO)
+	/* 32-bit res (sizeof==4): 10 chars in dec, 10 in hex ("0x" + 8)
+	 * 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */
+#define RSRC_BUF_SIZE		((2 * sizeof(resource_size_t)) + 4)
+#define FLAG_BUF_SIZE		(2 * sizeof(res->flags))
+#define DECODED_BUF_SIZE	sizeof("[mem - 64bit pref disabled]")
+#define RAW_BUF_SIZE		sizeof("[mem - flags 0x]")
+	char sym[max(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE,
+		     2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)];
+
+	char *p = sym, *pend = sym + sizeof(sym);
+	int size = -1, addr = 0;
+	int decode = (fmt[0] == 'R') ? 1 : 0;
+
+	if (res->flags & IORESOURCE_IO) {
 		size = IO_RSRC_PRINTK_SIZE;
-	else if (res->flags & IORESOURCE_MEM)
+		addr = 1;
+	} else if (res->flags & IORESOURCE_MEM) {
 		size = MEM_RSRC_PRINTK_SIZE;
+		addr = 1;
+	}
 
 	*p++ = '[';
-	num_spec.field_width = size;
-	p = number(p, pend, res->start, num_spec);
-	*p++ = '-';
-	p = number(p, pend, res->end, num_spec);
+	if (res->flags & IORESOURCE_IO)
+		p = string(p, pend, "io  ", str_spec);
+	else if (res->flags & IORESOURCE_MEM)
+		p = string(p, pend, "mem ", str_spec);
+	else if (res->flags & IORESOURCE_IRQ)
+		p = string(p, pend, "irq ", str_spec);
+	else if (res->flags & IORESOURCE_DMA)
+		p = string(p, pend, "dma ", str_spec);
+	else {
+		p = string(p, pend, "??? ", str_spec);
+		decode = 0;
+	}
+	hex_spec.field_width = size;
+	p = number(p, pend, res->start, addr ? hex_spec : dec_spec);
+	if (res->start != res->end) {
+		*p++ = '-';
+		p = number(p, pend, res->end, addr ? hex_spec : dec_spec);
+	}
+	if (decode) {
+		if (res->flags & IORESOURCE_MEM_64)
+			p = string(p, pend, " 64bit", str_spec);
+		if (res->flags & IORESOURCE_PREFETCH)
+			p = string(p, pend, " pref", str_spec);
+		if (res->flags & IORESOURCE_DISABLED)
+			p = string(p, pend, " disabled", str_spec);
+	} else {
+		p = string(p, pend, " flags ", str_spec);
+		p = number(p, pend, res->flags, flag_spec);
+	}
 	*p++ = ']';
-	*p = 0;
+	*p = '\0';
 
 	return string(buf, end, sym, spec);
 }
@@ -801,8 +853,8 @@
  * - 'f' For simple symbolic function names without offset
  * - 'S' For symbolic direct pointers with offset
  * - 's' For symbolic direct pointers without offset
- * - 'R' For a struct resource pointer, it prints the range of
- *       addresses (not the name nor the flags)
+ * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
+ * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
  * - 'M' For a 6-byte MAC address, it prints the address in the
  *       usual colon-separated hex notation
  * - 'm' For a 6-byte MAC address, it prints the hex address without colons
@@ -833,7 +885,8 @@
 	case 'S':
 		return symbol_string(buf, end, ptr, spec, *fmt);
 	case 'R':
-		return resource_string(buf, end, ptr, spec);
+	case 'r':
+		return resource_string(buf, end, ptr, spec, fmt);
 	case 'M':			/* Colon separated: 00:01:02:03:04:05 */
 	case 'm':			/* Contiguous: 000102030405 */
 		return mac_address_string(buf, end, ptr, spec, fmt);
diff --git a/mm/mmap.c b/mm/mmap.c
index 292ddc3..ed70a68 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -931,13 +931,9 @@
 	if (!(flags & MAP_FIXED))
 		addr = round_hint_to_min(addr);
 
-	error = arch_mmap_check(addr, len, flags);
-	if (error)
-		return error;
-
 	/* Careful about overflows.. */
 	len = PAGE_ALIGN(len);
-	if (!len || len > TASK_SIZE)
+	if (!len)
 		return -ENOMEM;
 
 	/* offset overflow? */
@@ -948,24 +944,6 @@
 	if (mm->map_count > sysctl_max_map_count)
 		return -ENOMEM;
 
-	if (flags & MAP_HUGETLB) {
-		struct user_struct *user = NULL;
-		if (file)
-			return -EINVAL;
-
-		/*
-		 * VM_NORESERVE is used because the reservations will be
-		 * taken when vm_ops->mmap() is called
-		 * A dummy user value is used because we are not locking
-		 * memory so no accounting is necessary
-		 */
-		len = ALIGN(len, huge_page_size(&default_hstate));
-		file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE,
-						&user, HUGETLB_ANONHUGE_INODE);
-		if (IS_ERR(file))
-			return PTR_ERR(file);
-	}
-
 	/* Obtain the address to map to. we verify (or select) it and ensure
 	 * that it represents a valid section of the address space.
 	 */
@@ -1455,6 +1433,14 @@
 	unsigned long (*get_area)(struct file *, unsigned long,
 				  unsigned long, unsigned long, unsigned long);
 
+	unsigned long error = arch_mmap_check(addr, len, flags);
+	if (error)
+		return error;
+
+	/* Careful about overflows.. */
+	if (len > TASK_SIZE)
+		return -ENOMEM;
+
 	get_area = current->mm->get_unmapped_area;
 	if (file && file->f_op && file->f_op->get_unmapped_area)
 		get_area = file->f_op->get_unmapped_area;
@@ -1999,20 +1985,14 @@
 	if (!len)
 		return addr;
 
-	if ((addr + len) > TASK_SIZE || (addr + len) < addr)
-		return -EINVAL;
-
-	if (is_hugepage_only_range(mm, addr, len))
-		return -EINVAL;
-
 	error = security_file_mmap(NULL, 0, 0, 0, addr, 1);
 	if (error)
 		return error;
 
 	flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
 
-	error = arch_mmap_check(addr, len, flags);
-	if (error)
+	error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
+	if (error & ~PAGE_MASK)
 		return error;
 
 	/*
diff --git a/mm/mremap.c b/mm/mremap.c
index 97bff25..8451908 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -261,6 +261,137 @@
 	return new_addr;
 }
 
+static struct vm_area_struct *vma_to_resize(unsigned long addr,
+	unsigned long old_len, unsigned long new_len, unsigned long *p)
+{
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma = find_vma(mm, addr);
+
+	if (!vma || vma->vm_start > addr)
+		goto Efault;
+
+	if (is_vm_hugetlb_page(vma))
+		goto Einval;
+
+	/* We can't remap across vm area boundaries */
+	if (old_len > vma->vm_end - addr)
+		goto Efault;
+
+	if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) {
+		if (new_len > old_len)
+			goto Efault;
+	}
+
+	if (vma->vm_flags & VM_LOCKED) {
+		unsigned long locked, lock_limit;
+		locked = mm->locked_vm << PAGE_SHIFT;
+		lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
+		locked += new_len - old_len;
+		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+			goto Eagain;
+	}
+
+	if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT))
+		goto Enomem;
+
+	if (vma->vm_flags & VM_ACCOUNT) {
+		unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
+		if (security_vm_enough_memory(charged))
+			goto Efault;
+		*p = charged;
+	}
+
+	return vma;
+
+Efault:	/* very odd choice for most of the cases, but... */
+	return ERR_PTR(-EFAULT);
+Einval:
+	return ERR_PTR(-EINVAL);
+Enomem:
+	return ERR_PTR(-ENOMEM);
+Eagain:
+	return ERR_PTR(-EAGAIN);
+}
+
+static unsigned long mremap_to(unsigned long addr,
+	unsigned long old_len, unsigned long new_addr,
+	unsigned long new_len)
+{
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma;
+	unsigned long ret = -EINVAL;
+	unsigned long charged = 0;
+	unsigned long map_flags;
+
+	if (new_addr & ~PAGE_MASK)
+		goto out;
+
+	if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
+		goto out;
+
+	/* Check if the location we're moving into overlaps the
+	 * old location at all, and fail if it does.
+	 */
+	if ((new_addr <= addr) && (new_addr+new_len) > addr)
+		goto out;
+
+	if ((addr <= new_addr) && (addr+old_len) > new_addr)
+		goto out;
+
+	ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
+	if (ret)
+		goto out;
+
+	ret = do_munmap(mm, new_addr, new_len);
+	if (ret)
+		goto out;
+
+	if (old_len >= new_len) {
+		ret = do_munmap(mm, addr+new_len, old_len - new_len);
+		if (ret && old_len != new_len)
+			goto out;
+		old_len = new_len;
+	}
+
+	vma = vma_to_resize(addr, old_len, new_len, &charged);
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
+		goto out;
+	}
+
+	map_flags = MAP_FIXED;
+	if (vma->vm_flags & VM_MAYSHARE)
+		map_flags |= MAP_SHARED;
+
+	ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
+				((addr - vma->vm_start) >> PAGE_SHIFT),
+				map_flags);
+	if (ret & ~PAGE_MASK)
+		goto out1;
+
+	ret = move_vma(vma, addr, old_len, new_len, new_addr);
+	if (!(ret & ~PAGE_MASK))
+		goto out;
+out1:
+	vm_unacct_memory(charged);
+
+out:
+	return ret;
+}
+
+static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
+{
+	unsigned long end = vma->vm_end + delta;
+	if (end < vma->vm_end) /* overflow */
+		return 0;
+	if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
+		return 0;
+	if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
+			      0, MAP_FIXED) & ~PAGE_MASK)
+		return 0;
+	return 1;
+}
+
 /*
  * Expand (or shrink) an existing mapping, potentially moving it at the
  * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
@@ -294,32 +425,10 @@
 	if (!new_len)
 		goto out;
 
-	/* new_addr is only valid if MREMAP_FIXED is specified */
 	if (flags & MREMAP_FIXED) {
-		if (new_addr & ~PAGE_MASK)
-			goto out;
-		if (!(flags & MREMAP_MAYMOVE))
-			goto out;
-
-		if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
-			goto out;
-
-		/* Check if the location we're moving into overlaps the
-		 * old location at all, and fail if it does.
-		 */
-		if ((new_addr <= addr) && (new_addr+new_len) > addr)
-			goto out;
-
-		if ((addr <= new_addr) && (addr+old_len) > new_addr)
-			goto out;
-
-		ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
-		if (ret)
-			goto out;
-
-		ret = do_munmap(mm, new_addr, new_len);
-		if (ret)
-			goto out;
+		if (flags & MREMAP_MAYMOVE)
+			ret = mremap_to(addr, old_len, new_addr, new_len);
+		goto out;
 	}
 
 	/*
@@ -332,60 +441,23 @@
 		if (ret && old_len != new_len)
 			goto out;
 		ret = addr;
-		if (!(flags & MREMAP_FIXED) || (new_addr == addr))
-			goto out;
-		old_len = new_len;
+		goto out;
 	}
 
 	/*
-	 * Ok, we need to grow..  or relocate.
+	 * Ok, we need to grow..
 	 */
-	ret = -EFAULT;
-	vma = find_vma(mm, addr);
-	if (!vma || vma->vm_start > addr)
+	vma = vma_to_resize(addr, old_len, new_len, &charged);
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
 		goto out;
-	if (is_vm_hugetlb_page(vma)) {
-		ret = -EINVAL;
-		goto out;
-	}
-	/* We can't remap across vm area boundaries */
-	if (old_len > vma->vm_end - addr)
-		goto out;
-	if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) {
-		if (new_len > old_len)
-			goto out;
-	}
-	if (vma->vm_flags & VM_LOCKED) {
-		unsigned long locked, lock_limit;
-		locked = mm->locked_vm << PAGE_SHIFT;
-		lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
-		locked += new_len - old_len;
-		ret = -EAGAIN;
-		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
-			goto out;
-	}
-	if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	if (vma->vm_flags & VM_ACCOUNT) {
-		charged = (new_len - old_len) >> PAGE_SHIFT;
-		if (security_vm_enough_memory(charged))
-			goto out_nc;
 	}
 
 	/* old_len exactly to the end of the area..
-	 * And we're not relocating the area.
 	 */
-	if (old_len == vma->vm_end - addr &&
-	    !((flags & MREMAP_FIXED) && (addr != new_addr)) &&
-	    (old_len != new_len || !(flags & MREMAP_MAYMOVE))) {
-		unsigned long max_addr = TASK_SIZE;
-		if (vma->vm_next)
-			max_addr = vma->vm_next->vm_start;
+	if (old_len == vma->vm_end - addr) {
 		/* can we just expand the current mapping? */
-		if (max_addr - addr >= new_len) {
+		if (vma_expandable(vma, new_len - old_len)) {
 			int pages = (new_len - old_len) >> PAGE_SHIFT;
 
 			vma_adjust(vma, vma->vm_start,
@@ -409,28 +481,27 @@
 	 */
 	ret = -ENOMEM;
 	if (flags & MREMAP_MAYMOVE) {
-		if (!(flags & MREMAP_FIXED)) {
-			unsigned long map_flags = 0;
-			if (vma->vm_flags & VM_MAYSHARE)
-				map_flags |= MAP_SHARED;
+		unsigned long map_flags = 0;
+		if (vma->vm_flags & VM_MAYSHARE)
+			map_flags |= MAP_SHARED;
 
-			new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
-						vma->vm_pgoff, map_flags);
-			if (new_addr & ~PAGE_MASK) {
-				ret = new_addr;
-				goto out;
-			}
-
-			ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
-			if (ret)
-				goto out;
+		new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
+					vma->vm_pgoff +
+					((addr - vma->vm_start) >> PAGE_SHIFT),
+					map_flags);
+		if (new_addr & ~PAGE_MASK) {
+			ret = new_addr;
+			goto out;
 		}
+
+		ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
+		if (ret)
+			goto out;
 		ret = move_vma(vma, addr, old_len, new_len, new_addr);
 	}
 out:
 	if (ret & ~PAGE_MASK)
 		vm_unacct_memory(charged);
-out_nc:
 	return ret;
 }
 
diff --git a/mm/util.c b/mm/util.c
index 7c35ad9..b377ce4 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -4,6 +4,10 @@
 #include <linux/module.h>
 #include <linux/err.h>
 #include <linux/sched.h>
+#include <linux/hugetlb.h>
+#include <linux/syscalls.h>
+#include <linux/mman.h>
+#include <linux/file.h>
 #include <asm/uaccess.h>
 
 #define CREATE_TRACE_POINTS
@@ -268,6 +272,46 @@
 }
 EXPORT_SYMBOL_GPL(get_user_pages_fast);
 
+SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
+		unsigned long, prot, unsigned long, flags,
+		unsigned long, fd, unsigned long, pgoff)
+{
+	struct file * file = NULL;
+	unsigned long retval = -EBADF;
+
+	if (!(flags & MAP_ANONYMOUS)) {
+		if (unlikely(flags & MAP_HUGETLB))
+			return -EINVAL;
+		file = fget(fd);
+		if (!file)
+			goto out;
+	} else if (flags & MAP_HUGETLB) {
+		struct user_struct *user = NULL;
+		/*
+		 * VM_NORESERVE is used because the reservations will be
+		 * taken when vm_ops->mmap() is called
+		 * A dummy user value is used because we are not locking
+		 * memory so no accounting is necessary
+		 */
+		len = ALIGN(len, huge_page_size(&default_hstate));
+		file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE,
+						&user, HUGETLB_ANONHUGE_INODE);
+		if (IS_ERR(file))
+			return PTR_ERR(file);
+	}
+
+	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+	down_write(&current->mm->mmap_sem);
+	retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+	up_write(&current->mm->mmap_sem);
+
+	if (file)
+		fput(file);
+out:
+	return retval;
+}
+
 /* Tracepoints definitions. */
 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 62a9025..6f426af 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -104,7 +104,7 @@
 static void do_usb_entry(struct usb_device_id *id,
 			 unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
 			 unsigned char range_lo, unsigned char range_hi,
-			 struct module *mod)
+			 unsigned char max, struct module *mod)
 {
 	char alias[500];
 	strcpy(alias, "usb:");
@@ -118,9 +118,22 @@
 		sprintf(alias + strlen(alias), "%0*X",
 			bcdDevice_initial_digits, bcdDevice_initial);
 	if (range_lo == range_hi)
-		sprintf(alias + strlen(alias), "%u", range_lo);
-	else if (range_lo > 0 || range_hi < 9)
-		sprintf(alias + strlen(alias), "[%u-%u]", range_lo, range_hi);
+		sprintf(alias + strlen(alias), "%X", range_lo);
+	else if (range_lo > 0 || range_hi < max) {
+		if (range_lo > 0x9 || range_hi < 0xA)
+			sprintf(alias + strlen(alias),
+				"[%X-%X]",
+				range_lo,
+				range_hi);
+		else {
+			sprintf(alias + strlen(alias),
+				range_lo < 0x9 ? "[%X-9" : "[%X",
+				range_lo);
+			sprintf(alias + strlen(alias),
+				range_hi > 0xA ? "a-%X]" : "%X]",
+				range_lo);
+		}
+	}
 	if (bcdDevice_initial_digits < (sizeof(id->bcdDevice_lo) * 2 - 1))
 		strcat(alias, "*");
 
@@ -147,10 +160,49 @@
 		   "MODULE_ALIAS(\"%s\");\n", alias);
 }
 
+/* Handles increment/decrement of BCD formatted integers */
+/* Returns the previous value, so it works like i++ or i-- */
+static unsigned int incbcd(unsigned int *bcd,
+			   int inc,
+			   unsigned char max,
+			   size_t chars)
+{
+	unsigned int init = *bcd, i, j;
+	unsigned long long c, dec = 0;
+
+	/* If bcd is not in BCD format, just increment */
+	if (max > 0x9) {
+		*bcd += inc;
+		return init;
+	}
+
+	/* Convert BCD to Decimal */
+	for (i=0 ; i < chars ; i++) {
+		c = (*bcd >> (i << 2)) & 0xf;
+		c = c > 9 ? 9 : c; /* force to bcd just in case */
+		for (j=0 ; j < i ; j++)
+			c = c * 10;
+		dec += c;
+	}
+
+	/* Do our increment/decrement */
+	dec += inc;
+	*bcd  = 0;
+
+	/* Convert back to BCD */
+	for (i=0 ; i < chars ; i++) {
+		for (c=1,j=0 ; j < i ; j++)
+			c = c * 10;
+		c = (dec / c) % 10;
+		*bcd += c << (i << 2);
+	}
+	return init;
+}
+
 static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
 {
 	unsigned int devlo, devhi;
-	unsigned char chi, clo;
+	unsigned char chi, clo, max;
 	int ndigits;
 
 	id->match_flags = TO_NATIVE(id->match_flags);
@@ -162,6 +214,17 @@
 	devhi = id->match_flags & USB_DEVICE_ID_MATCH_DEV_HI ?
 		TO_NATIVE(id->bcdDevice_hi) : ~0x0U;
 
+	/* Figure out if this entry is in bcd or hex format */
+	max = 0x9; /* Default to decimal format */
+	for (ndigits = 0 ; ndigits < sizeof(id->bcdDevice_lo) * 2 ; ndigits++) {
+		clo = (devlo >> (ndigits << 2)) & 0xf;
+		chi = ((devhi > 0x9999 ? 0x9999 : devhi) >> (ndigits << 2)) & 0xf;
+		if (clo > max || chi > max) {
+			max = 0xf;
+			break;
+		}
+	}
+
 	/*
 	 * Some modules (visor) have empty slots as placeholder for
 	 * run-time specification that results in catch-all alias
@@ -173,21 +236,27 @@
 	for (ndigits = sizeof(id->bcdDevice_lo) * 2 - 1; devlo <= devhi; ndigits--) {
 		clo = devlo & 0xf;
 		chi = devhi & 0xf;
-		if (chi > 9)	/* it's bcd not hex */
-			chi = 9;
+		if (chi > max)	/* If we are in bcd mode, truncate if necessary */
+			chi = max;
 		devlo >>= 4;
 		devhi >>= 4;
 
 		if (devlo == devhi || !ndigits) {
-			do_usb_entry(id, devlo, ndigits, clo, chi, mod);
+			do_usb_entry(id, devlo, ndigits, clo, chi, max, mod);
 			break;
 		}
 
-		if (clo > 0)
-			do_usb_entry(id, devlo++, ndigits, clo, 9, mod);
+		if (clo > 0x0)
+			do_usb_entry(id,
+				     incbcd(&devlo, 1, max,
+					    sizeof(id->bcdDevice_lo) * 2),
+				     ndigits, clo, max, max, mod);
 
-		if (chi < 9)
-			do_usb_entry(id, devhi--, ndigits, 0, chi, mod);
+		if (chi < max)
+			do_usb_entry(id,
+				     incbcd(&devhi, -1, max,
+					    sizeof(id->bcdDevice_lo) * 2),
+				     ndigits, 0x0, chi, max, mod);
 	}
 }